]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * | |
3 | * Copyright (C) 1995 Linus Torvalds | |
4 | * | |
5 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
6 | */ | |
7 | ||
8 | #include <linux/signal.h> | |
9 | #include <linux/sched.h> | |
10 | #include <linux/kernel.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/string.h> | |
13 | #include <linux/types.h> | |
14 | #include <linux/ptrace.h> | |
15 | #include <linux/mman.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/hugetlb.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/highmem.h> | |
22 | #include <linux/pagemap.h> | |
23 | #include <linux/pci.h> | |
24 | #include <linux/pfn.h> | |
25 | #include <linux/poison.h> | |
26 | #include <linux/bootmem.h> | |
27 | #include <linux/memblock.h> | |
28 | #include <linux/proc_fs.h> | |
29 | #include <linux/memory_hotplug.h> | |
30 | #include <linux/initrd.h> | |
31 | #include <linux/cpumask.h> | |
32 | #include <linux/gfp.h> | |
33 | ||
34 | #include <asm/asm.h> | |
35 | #include <asm/bios_ebda.h> | |
36 | #include <asm/processor.h> | |
37 | #include <linux/uaccess.h> | |
38 | #include <asm/pgtable.h> | |
39 | #include <asm/dma.h> | |
40 | #include <asm/fixmap.h> | |
41 | #include <asm/e820/api.h> | |
42 | #include <asm/apic.h> | |
43 | #include <asm/bugs.h> | |
44 | #include <asm/tlb.h> | |
45 | #include <asm/tlbflush.h> | |
46 | #include <asm/olpc_ofw.h> | |
47 | #include <asm/pgalloc.h> | |
48 | #include <asm/sections.h> | |
49 | #include <asm/paravirt.h> | |
50 | #include <asm/setup.h> | |
51 | #include <asm/set_memory.h> | |
52 | #include <asm/page_types.h> | |
53 | #include <asm/init.h> | |
54 | ||
55 | #include "mm_internal.h" | |
56 | ||
57 | unsigned long highstart_pfn, highend_pfn; | |
58 | ||
59 | bool __read_mostly __vmalloc_start_set = false; | |
60 | ||
61 | /* | |
62 | * Creates a middle page table and puts a pointer to it in the | |
63 | * given global directory entry. This only returns the gd entry | |
64 | * in non-PAE compilation mode, since the middle layer is folded. | |
65 | */ | |
66 | static pmd_t * __init one_md_table_init(pgd_t *pgd) | |
67 | { | |
68 | p4d_t *p4d; | |
69 | pud_t *pud; | |
70 | pmd_t *pmd_table; | |
71 | ||
72 | #ifdef CONFIG_X86_PAE | |
73 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { | |
74 | pmd_table = (pmd_t *)alloc_low_page(); | |
75 | paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); | |
76 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | |
77 | p4d = p4d_offset(pgd, 0); | |
78 | pud = pud_offset(p4d, 0); | |
79 | BUG_ON(pmd_table != pmd_offset(pud, 0)); | |
80 | ||
81 | return pmd_table; | |
82 | } | |
83 | #endif | |
84 | p4d = p4d_offset(pgd, 0); | |
85 | pud = pud_offset(p4d, 0); | |
86 | pmd_table = pmd_offset(pud, 0); | |
87 | ||
88 | return pmd_table; | |
89 | } | |
90 | ||
91 | /* | |
92 | * Create a page table and place a pointer to it in a middle page | |
93 | * directory entry: | |
94 | */ | |
95 | static pte_t * __init one_page_table_init(pmd_t *pmd) | |
96 | { | |
97 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { | |
98 | pte_t *page_table = (pte_t *)alloc_low_page(); | |
99 | ||
100 | paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); | |
101 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); | |
102 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); | |
103 | } | |
104 | ||
105 | return pte_offset_kernel(pmd, 0); | |
106 | } | |
107 | ||
108 | pmd_t * __init populate_extra_pmd(unsigned long vaddr) | |
109 | { | |
110 | int pgd_idx = pgd_index(vaddr); | |
111 | int pmd_idx = pmd_index(vaddr); | |
112 | ||
113 | return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; | |
114 | } | |
115 | ||
116 | pte_t * __init populate_extra_pte(unsigned long vaddr) | |
117 | { | |
118 | int pte_idx = pte_index(vaddr); | |
119 | pmd_t *pmd; | |
120 | ||
121 | pmd = populate_extra_pmd(vaddr); | |
122 | return one_page_table_init(pmd) + pte_idx; | |
123 | } | |
124 | ||
125 | static unsigned long __init | |
126 | page_table_range_init_count(unsigned long start, unsigned long end) | |
127 | { | |
128 | unsigned long count = 0; | |
129 | #ifdef CONFIG_HIGHMEM | |
130 | int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; | |
131 | int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; | |
132 | int pgd_idx, pmd_idx; | |
133 | unsigned long vaddr; | |
134 | ||
135 | if (pmd_idx_kmap_begin == pmd_idx_kmap_end) | |
136 | return 0; | |
137 | ||
138 | vaddr = start; | |
139 | pgd_idx = pgd_index(vaddr); | |
140 | pmd_idx = pmd_index(vaddr); | |
141 | ||
142 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) { | |
143 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); | |
144 | pmd_idx++) { | |
145 | if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && | |
146 | (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) | |
147 | count++; | |
148 | vaddr += PMD_SIZE; | |
149 | } | |
150 | pmd_idx = 0; | |
151 | } | |
152 | #endif | |
153 | return count; | |
154 | } | |
155 | ||
156 | static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, | |
157 | unsigned long vaddr, pte_t *lastpte, | |
158 | void **adr) | |
159 | { | |
160 | #ifdef CONFIG_HIGHMEM | |
161 | /* | |
162 | * Something (early fixmap) may already have put a pte | |
163 | * page here, which causes the page table allocation | |
164 | * to become nonlinear. Attempt to fix it, and if it | |
165 | * is still nonlinear then we have to bug. | |
166 | */ | |
167 | int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; | |
168 | int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; | |
169 | ||
170 | if (pmd_idx_kmap_begin != pmd_idx_kmap_end | |
171 | && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin | |
172 | && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) { | |
173 | pte_t *newpte; | |
174 | int i; | |
175 | ||
176 | BUG_ON(after_bootmem); | |
177 | newpte = *adr; | |
178 | for (i = 0; i < PTRS_PER_PTE; i++) | |
179 | set_pte(newpte + i, pte[i]); | |
180 | *adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE); | |
181 | ||
182 | paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); | |
183 | set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); | |
184 | BUG_ON(newpte != pte_offset_kernel(pmd, 0)); | |
185 | __flush_tlb_all(); | |
186 | ||
187 | paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); | |
188 | pte = newpte; | |
189 | } | |
190 | BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) | |
191 | && vaddr > fix_to_virt(FIX_KMAP_END) | |
192 | && lastpte && lastpte + PTRS_PER_PTE != pte); | |
193 | #endif | |
194 | return pte; | |
195 | } | |
196 | ||
197 | /* | |
198 | * This function initializes a certain range of kernel virtual memory | |
199 | * with new bootmem page tables, everywhere page tables are missing in | |
200 | * the given range. | |
201 | * | |
202 | * NOTE: The pagetables are allocated contiguous on the physical space | |
203 | * so we can cache the place of the first one and move around without | |
204 | * checking the pgd every time. | |
205 | */ | |
206 | static void __init | |
207 | page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |
208 | { | |
209 | int pgd_idx, pmd_idx; | |
210 | unsigned long vaddr; | |
211 | pgd_t *pgd; | |
212 | pmd_t *pmd; | |
213 | pte_t *pte = NULL; | |
214 | unsigned long count = page_table_range_init_count(start, end); | |
215 | void *adr = NULL; | |
216 | ||
217 | if (count) | |
218 | adr = alloc_low_pages(count); | |
219 | ||
220 | vaddr = start; | |
221 | pgd_idx = pgd_index(vaddr); | |
222 | pmd_idx = pmd_index(vaddr); | |
223 | pgd = pgd_base + pgd_idx; | |
224 | ||
225 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | |
226 | pmd = one_md_table_init(pgd); | |
227 | pmd = pmd + pmd_index(vaddr); | |
228 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); | |
229 | pmd++, pmd_idx++) { | |
230 | pte = page_table_kmap_check(one_page_table_init(pmd), | |
231 | pmd, vaddr, pte, &adr); | |
232 | ||
233 | vaddr += PMD_SIZE; | |
234 | } | |
235 | pmd_idx = 0; | |
236 | } | |
237 | } | |
238 | ||
239 | static inline int is_kernel_text(unsigned long addr) | |
240 | { | |
241 | if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) | |
242 | return 1; | |
243 | return 0; | |
244 | } | |
245 | ||
246 | /* | |
247 | * This maps the physical memory to kernel virtual address space, a total | |
248 | * of max_low_pfn pages, by creating page tables starting from address | |
249 | * PAGE_OFFSET: | |
250 | */ | |
251 | unsigned long __init | |
252 | kernel_physical_mapping_init(unsigned long start, | |
253 | unsigned long end, | |
254 | unsigned long page_size_mask) | |
255 | { | |
256 | int use_pse = page_size_mask == (1<<PG_LEVEL_2M); | |
257 | unsigned long last_map_addr = end; | |
258 | unsigned long start_pfn, end_pfn; | |
259 | pgd_t *pgd_base = swapper_pg_dir; | |
260 | int pgd_idx, pmd_idx, pte_ofs; | |
261 | unsigned long pfn; | |
262 | pgd_t *pgd; | |
263 | pmd_t *pmd; | |
264 | pte_t *pte; | |
265 | unsigned pages_2m, pages_4k; | |
266 | int mapping_iter; | |
267 | ||
268 | start_pfn = start >> PAGE_SHIFT; | |
269 | end_pfn = end >> PAGE_SHIFT; | |
270 | ||
271 | /* | |
272 | * First iteration will setup identity mapping using large/small pages | |
273 | * based on use_pse, with other attributes same as set by | |
274 | * the early code in head_32.S | |
275 | * | |
276 | * Second iteration will setup the appropriate attributes (NX, GLOBAL..) | |
277 | * as desired for the kernel identity mapping. | |
278 | * | |
279 | * This two pass mechanism conforms to the TLB app note which says: | |
280 | * | |
281 | * "Software should not write to a paging-structure entry in a way | |
282 | * that would change, for any linear address, both the page size | |
283 | * and either the page frame or attributes." | |
284 | */ | |
285 | mapping_iter = 1; | |
286 | ||
287 | if (!boot_cpu_has(X86_FEATURE_PSE)) | |
288 | use_pse = 0; | |
289 | ||
290 | repeat: | |
291 | pages_2m = pages_4k = 0; | |
292 | pfn = start_pfn; | |
293 | pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); | |
294 | pgd = pgd_base + pgd_idx; | |
295 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { | |
296 | pmd = one_md_table_init(pgd); | |
297 | ||
298 | if (pfn >= end_pfn) | |
299 | continue; | |
300 | #ifdef CONFIG_X86_PAE | |
301 | pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); | |
302 | pmd += pmd_idx; | |
303 | #else | |
304 | pmd_idx = 0; | |
305 | #endif | |
306 | for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; | |
307 | pmd++, pmd_idx++) { | |
308 | unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; | |
309 | ||
310 | /* | |
311 | * Map with big pages if possible, otherwise | |
312 | * create normal page tables: | |
313 | */ | |
314 | if (use_pse) { | |
315 | unsigned int addr2; | |
316 | pgprot_t prot = PAGE_KERNEL_LARGE; | |
317 | /* | |
318 | * first pass will use the same initial | |
319 | * identity mapping attribute + _PAGE_PSE. | |
320 | */ | |
321 | pgprot_t init_prot = | |
322 | __pgprot(PTE_IDENT_ATTR | | |
323 | _PAGE_PSE); | |
324 | ||
325 | pfn &= PMD_MASK >> PAGE_SHIFT; | |
326 | addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + | |
327 | PAGE_OFFSET + PAGE_SIZE-1; | |
328 | ||
329 | if (is_kernel_text(addr) || | |
330 | is_kernel_text(addr2)) | |
331 | prot = PAGE_KERNEL_LARGE_EXEC; | |
332 | ||
333 | pages_2m++; | |
334 | if (mapping_iter == 1) | |
335 | set_pmd(pmd, pfn_pmd(pfn, init_prot)); | |
336 | else | |
337 | set_pmd(pmd, pfn_pmd(pfn, prot)); | |
338 | ||
339 | pfn += PTRS_PER_PTE; | |
340 | continue; | |
341 | } | |
342 | pte = one_page_table_init(pmd); | |
343 | ||
344 | pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); | |
345 | pte += pte_ofs; | |
346 | for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; | |
347 | pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { | |
348 | pgprot_t prot = PAGE_KERNEL; | |
349 | /* | |
350 | * first pass will use the same initial | |
351 | * identity mapping attribute. | |
352 | */ | |
353 | pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); | |
354 | ||
355 | if (is_kernel_text(addr)) | |
356 | prot = PAGE_KERNEL_EXEC; | |
357 | ||
358 | pages_4k++; | |
359 | if (mapping_iter == 1) { | |
360 | set_pte(pte, pfn_pte(pfn, init_prot)); | |
361 | last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; | |
362 | } else | |
363 | set_pte(pte, pfn_pte(pfn, prot)); | |
364 | } | |
365 | } | |
366 | } | |
367 | if (mapping_iter == 1) { | |
368 | /* | |
369 | * update direct mapping page count only in the first | |
370 | * iteration. | |
371 | */ | |
372 | update_page_count(PG_LEVEL_2M, pages_2m); | |
373 | update_page_count(PG_LEVEL_4K, pages_4k); | |
374 | ||
375 | /* | |
376 | * local global flush tlb, which will flush the previous | |
377 | * mappings present in both small and large page TLB's. | |
378 | */ | |
379 | __flush_tlb_all(); | |
380 | ||
381 | /* | |
382 | * Second iteration will set the actual desired PTE attributes. | |
383 | */ | |
384 | mapping_iter = 2; | |
385 | goto repeat; | |
386 | } | |
387 | return last_map_addr; | |
388 | } | |
389 | ||
390 | pte_t *kmap_pte; | |
391 | ||
392 | static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) | |
393 | { | |
394 | pgd_t *pgd = pgd_offset_k(vaddr); | |
395 | p4d_t *p4d = p4d_offset(pgd, vaddr); | |
396 | pud_t *pud = pud_offset(p4d, vaddr); | |
397 | pmd_t *pmd = pmd_offset(pud, vaddr); | |
398 | return pte_offset_kernel(pmd, vaddr); | |
399 | } | |
400 | ||
401 | static void __init kmap_init(void) | |
402 | { | |
403 | unsigned long kmap_vstart; | |
404 | ||
405 | /* | |
406 | * Cache the first kmap pte: | |
407 | */ | |
408 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); | |
409 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | |
410 | } | |
411 | ||
412 | #ifdef CONFIG_HIGHMEM | |
413 | static void __init permanent_kmaps_init(pgd_t *pgd_base) | |
414 | { | |
415 | unsigned long vaddr; | |
416 | pgd_t *pgd; | |
417 | p4d_t *p4d; | |
418 | pud_t *pud; | |
419 | pmd_t *pmd; | |
420 | pte_t *pte; | |
421 | ||
422 | vaddr = PKMAP_BASE; | |
423 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | |
424 | ||
425 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
426 | p4d = p4d_offset(pgd, vaddr); | |
427 | pud = pud_offset(p4d, vaddr); | |
428 | pmd = pmd_offset(pud, vaddr); | |
429 | pte = pte_offset_kernel(pmd, vaddr); | |
430 | pkmap_page_table = pte; | |
431 | } | |
432 | ||
433 | void __init add_highpages_with_active_regions(int nid, | |
434 | unsigned long start_pfn, unsigned long end_pfn) | |
435 | { | |
436 | phys_addr_t start, end; | |
437 | u64 i; | |
438 | ||
439 | for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) { | |
440 | unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), | |
441 | start_pfn, end_pfn); | |
442 | unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), | |
443 | start_pfn, end_pfn); | |
444 | for ( ; pfn < e_pfn; pfn++) | |
445 | if (pfn_valid(pfn)) | |
446 | free_highmem_page(pfn_to_page(pfn)); | |
447 | } | |
448 | } | |
449 | #else | |
450 | static inline void permanent_kmaps_init(pgd_t *pgd_base) | |
451 | { | |
452 | } | |
453 | #endif /* CONFIG_HIGHMEM */ | |
454 | ||
455 | void __init native_pagetable_init(void) | |
456 | { | |
457 | unsigned long pfn, va; | |
458 | pgd_t *pgd, *base = swapper_pg_dir; | |
459 | p4d_t *p4d; | |
460 | pud_t *pud; | |
461 | pmd_t *pmd; | |
462 | pte_t *pte; | |
463 | ||
464 | /* | |
465 | * Remove any mappings which extend past the end of physical | |
466 | * memory from the boot time page table. | |
467 | * In virtual address space, we should have at least two pages | |
468 | * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END | |
469 | * definition. And max_low_pfn is set to VMALLOC_END physical | |
470 | * address. If initial memory mapping is doing right job, we | |
471 | * should have pte used near max_low_pfn or one pmd is not present. | |
472 | */ | |
473 | for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) { | |
474 | va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); | |
475 | pgd = base + pgd_index(va); | |
476 | if (!pgd_present(*pgd)) | |
477 | break; | |
478 | ||
479 | p4d = p4d_offset(pgd, va); | |
480 | pud = pud_offset(p4d, va); | |
481 | pmd = pmd_offset(pud, va); | |
482 | if (!pmd_present(*pmd)) | |
483 | break; | |
484 | ||
485 | /* should not be large page here */ | |
486 | if (pmd_large(*pmd)) { | |
487 | pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n", | |
488 | pfn, pmd, __pa(pmd)); | |
489 | BUG_ON(1); | |
490 | } | |
491 | ||
492 | pte = pte_offset_kernel(pmd, va); | |
493 | if (!pte_present(*pte)) | |
494 | break; | |
495 | ||
496 | printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n", | |
497 | pfn, pmd, __pa(pmd), pte, __pa(pte)); | |
498 | pte_clear(NULL, va, pte); | |
499 | } | |
500 | paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); | |
501 | paging_init(); | |
502 | } | |
503 | ||
504 | /* | |
505 | * Build a proper pagetable for the kernel mappings. Up until this | |
506 | * point, we've been running on some set of pagetables constructed by | |
507 | * the boot process. | |
508 | * | |
509 | * If we're booting on native hardware, this will be a pagetable | |
510 | * constructed in arch/x86/kernel/head_32.S. The root of the | |
511 | * pagetable will be swapper_pg_dir. | |
512 | * | |
513 | * If we're booting paravirtualized under a hypervisor, then there are | |
514 | * more options: we may already be running PAE, and the pagetable may | |
515 | * or may not be based in swapper_pg_dir. In any case, | |
516 | * paravirt_pagetable_init() will set up swapper_pg_dir | |
517 | * appropriately for the rest of the initialization to work. | |
518 | * | |
519 | * In general, pagetable_init() assumes that the pagetable may already | |
520 | * be partially populated, and so it avoids stomping on any existing | |
521 | * mappings. | |
522 | */ | |
523 | void __init early_ioremap_page_table_range_init(void) | |
524 | { | |
525 | pgd_t *pgd_base = swapper_pg_dir; | |
526 | unsigned long vaddr, end; | |
527 | ||
528 | /* | |
529 | * Fixed mappings, only the page table structure has to be | |
530 | * created - mappings will be set by set_fixmap(): | |
531 | */ | |
532 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | |
533 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | |
534 | page_table_range_init(vaddr, end, pgd_base); | |
535 | early_ioremap_reset(); | |
536 | } | |
537 | ||
538 | static void __init pagetable_init(void) | |
539 | { | |
540 | pgd_t *pgd_base = swapper_pg_dir; | |
541 | ||
542 | permanent_kmaps_init(pgd_base); | |
543 | } | |
544 | ||
545 | pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL); | |
546 | EXPORT_SYMBOL_GPL(__supported_pte_mask); | |
547 | ||
548 | /* user-defined highmem size */ | |
549 | static unsigned int highmem_pages = -1; | |
550 | ||
551 | /* | |
552 | * highmem=size forces highmem to be exactly 'size' bytes. | |
553 | * This works even on boxes that have no highmem otherwise. | |
554 | * This also works to reduce highmem size on bigger boxes. | |
555 | */ | |
556 | static int __init parse_highmem(char *arg) | |
557 | { | |
558 | if (!arg) | |
559 | return -EINVAL; | |
560 | ||
561 | highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; | |
562 | return 0; | |
563 | } | |
564 | early_param("highmem", parse_highmem); | |
565 | ||
566 | #define MSG_HIGHMEM_TOO_BIG \ | |
567 | "highmem size (%luMB) is bigger than pages available (%luMB)!\n" | |
568 | ||
569 | #define MSG_LOWMEM_TOO_SMALL \ | |
570 | "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" | |
571 | /* | |
572 | * All of RAM fits into lowmem - but if user wants highmem | |
573 | * artificially via the highmem=x boot parameter then create | |
574 | * it: | |
575 | */ | |
576 | static void __init lowmem_pfn_init(void) | |
577 | { | |
578 | /* max_low_pfn is 0, we already have early_res support */ | |
579 | max_low_pfn = max_pfn; | |
580 | ||
581 | if (highmem_pages == -1) | |
582 | highmem_pages = 0; | |
583 | #ifdef CONFIG_HIGHMEM | |
584 | if (highmem_pages >= max_pfn) { | |
585 | printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, | |
586 | pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); | |
587 | highmem_pages = 0; | |
588 | } | |
589 | if (highmem_pages) { | |
590 | if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { | |
591 | printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, | |
592 | pages_to_mb(highmem_pages)); | |
593 | highmem_pages = 0; | |
594 | } | |
595 | max_low_pfn -= highmem_pages; | |
596 | } | |
597 | #else | |
598 | if (highmem_pages) | |
599 | printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); | |
600 | #endif | |
601 | } | |
602 | ||
603 | #define MSG_HIGHMEM_TOO_SMALL \ | |
604 | "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" | |
605 | ||
606 | #define MSG_HIGHMEM_TRIMMED \ | |
607 | "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" | |
608 | /* | |
609 | * We have more RAM than fits into lowmem - we try to put it into | |
610 | * highmem, also taking the highmem=x boot parameter into account: | |
611 | */ | |
612 | static void __init highmem_pfn_init(void) | |
613 | { | |
614 | max_low_pfn = MAXMEM_PFN; | |
615 | ||
616 | if (highmem_pages == -1) | |
617 | highmem_pages = max_pfn - MAXMEM_PFN; | |
618 | ||
619 | if (highmem_pages + MAXMEM_PFN < max_pfn) | |
620 | max_pfn = MAXMEM_PFN + highmem_pages; | |
621 | ||
622 | if (highmem_pages + MAXMEM_PFN > max_pfn) { | |
623 | printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, | |
624 | pages_to_mb(max_pfn - MAXMEM_PFN), | |
625 | pages_to_mb(highmem_pages)); | |
626 | highmem_pages = 0; | |
627 | } | |
628 | #ifndef CONFIG_HIGHMEM | |
629 | /* Maximum memory usable is what is directly addressable */ | |
630 | printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); | |
631 | if (max_pfn > MAX_NONPAE_PFN) | |
632 | printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); | |
633 | else | |
634 | printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); | |
635 | max_pfn = MAXMEM_PFN; | |
636 | #else /* !CONFIG_HIGHMEM */ | |
637 | #ifndef CONFIG_HIGHMEM64G | |
638 | if (max_pfn > MAX_NONPAE_PFN) { | |
639 | max_pfn = MAX_NONPAE_PFN; | |
640 | printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); | |
641 | } | |
642 | #endif /* !CONFIG_HIGHMEM64G */ | |
643 | #endif /* !CONFIG_HIGHMEM */ | |
644 | } | |
645 | ||
646 | /* | |
647 | * Determine low and high memory ranges: | |
648 | */ | |
649 | void __init find_low_pfn_range(void) | |
650 | { | |
651 | /* it could update max_pfn */ | |
652 | ||
653 | if (max_pfn <= MAXMEM_PFN) | |
654 | lowmem_pfn_init(); | |
655 | else | |
656 | highmem_pfn_init(); | |
657 | } | |
658 | ||
659 | #ifndef CONFIG_NEED_MULTIPLE_NODES | |
660 | void __init initmem_init(void) | |
661 | { | |
662 | #ifdef CONFIG_HIGHMEM | |
663 | highstart_pfn = highend_pfn = max_pfn; | |
664 | if (max_pfn > max_low_pfn) | |
665 | highstart_pfn = max_low_pfn; | |
666 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | |
667 | pages_to_mb(highend_pfn - highstart_pfn)); | |
668 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | |
669 | #else | |
670 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | |
671 | #endif | |
672 | ||
673 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); | |
674 | sparse_memory_present_with_active_regions(0); | |
675 | ||
676 | #ifdef CONFIG_FLATMEM | |
677 | max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn; | |
678 | #endif | |
679 | __vmalloc_start_set = true; | |
680 | ||
681 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", | |
682 | pages_to_mb(max_low_pfn)); | |
683 | ||
684 | setup_bootmem_allocator(); | |
685 | } | |
686 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | |
687 | ||
688 | void __init setup_bootmem_allocator(void) | |
689 | { | |
690 | printk(KERN_INFO " mapped low ram: 0 - %08lx\n", | |
691 | max_pfn_mapped<<PAGE_SHIFT); | |
692 | printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); | |
693 | } | |
694 | ||
695 | /* | |
696 | * paging_init() sets up the page tables - note that the first 8MB are | |
697 | * already mapped by head.S. | |
698 | * | |
699 | * This routines also unmaps the page at virtual kernel address 0, so | |
700 | * that we can trap those pesky NULL-reference errors in the kernel. | |
701 | */ | |
702 | void __init paging_init(void) | |
703 | { | |
704 | pagetable_init(); | |
705 | ||
706 | __flush_tlb_all(); | |
707 | ||
708 | kmap_init(); | |
709 | ||
710 | /* | |
711 | * NOTE: at this point the bootmem allocator is fully available. | |
712 | */ | |
713 | olpc_dt_build_devicetree(); | |
714 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | |
715 | sparse_init(); | |
716 | zone_sizes_init(); | |
717 | } | |
718 | ||
719 | /* | |
720 | * Test if the WP bit works in supervisor mode. It isn't supported on 386's | |
721 | * and also on some strange 486's. All 586+'s are OK. This used to involve | |
722 | * black magic jumps to work around some nasty CPU bugs, but fortunately the | |
723 | * switch to using exceptions got rid of all that. | |
724 | */ | |
725 | static void __init test_wp_bit(void) | |
726 | { | |
727 | char z = 0; | |
728 | ||
729 | printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode..."); | |
730 | ||
731 | __set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO); | |
732 | ||
733 | if (probe_kernel_write((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) { | |
734 | clear_fixmap(FIX_WP_TEST); | |
735 | printk(KERN_CONT "Ok.\n"); | |
736 | return; | |
737 | } | |
738 | ||
739 | printk(KERN_CONT "No.\n"); | |
740 | panic("Linux doesn't support CPUs with broken WP."); | |
741 | } | |
742 | ||
743 | void __init mem_init(void) | |
744 | { | |
745 | pci_iommu_alloc(); | |
746 | ||
747 | #ifdef CONFIG_FLATMEM | |
748 | BUG_ON(!mem_map); | |
749 | #endif | |
750 | /* | |
751 | * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to | |
752 | * be done before free_all_bootmem(). Memblock use free low memory for | |
753 | * temporary data (see find_range_array()) and for this purpose can use | |
754 | * pages that was already passed to the buddy allocator, hence marked as | |
755 | * not accessible in the page tables when compiled with | |
756 | * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not | |
757 | * important here. | |
758 | */ | |
759 | set_highmem_pages_init(); | |
760 | ||
761 | /* this will put all low memory onto the freelists */ | |
762 | free_all_bootmem(); | |
763 | ||
764 | after_bootmem = 1; | |
765 | ||
766 | mem_init_print_info(NULL); | |
767 | printk(KERN_INFO "virtual kernel memory layout:\n" | |
768 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
769 | #ifdef CONFIG_HIGHMEM | |
770 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
771 | #endif | |
772 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
773 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
774 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
775 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
776 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | |
777 | FIXADDR_START, FIXADDR_TOP, | |
778 | (FIXADDR_TOP - FIXADDR_START) >> 10, | |
779 | ||
780 | #ifdef CONFIG_HIGHMEM | |
781 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | |
782 | (LAST_PKMAP*PAGE_SIZE) >> 10, | |
783 | #endif | |
784 | ||
785 | VMALLOC_START, VMALLOC_END, | |
786 | (VMALLOC_END - VMALLOC_START) >> 20, | |
787 | ||
788 | (unsigned long)__va(0), (unsigned long)high_memory, | |
789 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | |
790 | ||
791 | (unsigned long)&__init_begin, (unsigned long)&__init_end, | |
792 | ((unsigned long)&__init_end - | |
793 | (unsigned long)&__init_begin) >> 10, | |
794 | ||
795 | (unsigned long)&_etext, (unsigned long)&_edata, | |
796 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | |
797 | ||
798 | (unsigned long)&_text, (unsigned long)&_etext, | |
799 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | |
800 | ||
801 | /* | |
802 | * Check boundaries twice: Some fundamental inconsistencies can | |
803 | * be detected at build time already. | |
804 | */ | |
805 | #define __FIXADDR_TOP (-PAGE_SIZE) | |
806 | #ifdef CONFIG_HIGHMEM | |
807 | BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | |
808 | BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); | |
809 | #endif | |
810 | #define high_memory (-128UL << 20) | |
811 | BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); | |
812 | #undef high_memory | |
813 | #undef __FIXADDR_TOP | |
814 | ||
815 | #ifdef CONFIG_HIGHMEM | |
816 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | |
817 | BUG_ON(VMALLOC_END > PKMAP_BASE); | |
818 | #endif | |
819 | BUG_ON(VMALLOC_START >= VMALLOC_END); | |
820 | BUG_ON((unsigned long)high_memory > VMALLOC_START); | |
821 | ||
822 | test_wp_bit(); | |
823 | } | |
824 | ||
825 | #ifdef CONFIG_MEMORY_HOTPLUG | |
826 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) | |
827 | { | |
828 | struct pglist_data *pgdata = NODE_DATA(nid); | |
829 | struct zone *zone = pgdata->node_zones + | |
830 | zone_for_memory(nid, start, size, ZONE_HIGHMEM, for_device); | |
831 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
832 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
833 | ||
834 | return __add_pages(nid, zone, start_pfn, nr_pages, !for_device); | |
835 | } | |
836 | ||
837 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
838 | int arch_remove_memory(u64 start, u64 size) | |
839 | { | |
840 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
841 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
842 | struct zone *zone; | |
843 | ||
844 | zone = page_zone(pfn_to_page(start_pfn)); | |
845 | return __remove_pages(zone, start_pfn, nr_pages); | |
846 | } | |
847 | #endif | |
848 | #endif | |
849 | ||
850 | int kernel_set_to_readonly __read_mostly; | |
851 | ||
852 | void set_kernel_text_rw(void) | |
853 | { | |
854 | unsigned long start = PFN_ALIGN(_text); | |
855 | unsigned long size = PFN_ALIGN(_etext) - start; | |
856 | ||
857 | if (!kernel_set_to_readonly) | |
858 | return; | |
859 | ||
860 | pr_debug("Set kernel text: %lx - %lx for read write\n", | |
861 | start, start+size); | |
862 | ||
863 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); | |
864 | } | |
865 | ||
866 | void set_kernel_text_ro(void) | |
867 | { | |
868 | unsigned long start = PFN_ALIGN(_text); | |
869 | unsigned long size = PFN_ALIGN(_etext) - start; | |
870 | ||
871 | if (!kernel_set_to_readonly) | |
872 | return; | |
873 | ||
874 | pr_debug("Set kernel text: %lx - %lx for read only\n", | |
875 | start, start+size); | |
876 | ||
877 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); | |
878 | } | |
879 | ||
880 | static void mark_nxdata_nx(void) | |
881 | { | |
882 | /* | |
883 | * When this called, init has already been executed and released, | |
884 | * so everything past _etext should be NX. | |
885 | */ | |
886 | unsigned long start = PFN_ALIGN(_etext); | |
887 | /* | |
888 | * This comes from is_kernel_text upper limit. Also HPAGE where used: | |
889 | */ | |
890 | unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; | |
891 | ||
892 | if (__supported_pte_mask & _PAGE_NX) | |
893 | printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); | |
894 | set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT); | |
895 | } | |
896 | ||
897 | void mark_rodata_ro(void) | |
898 | { | |
899 | unsigned long start = PFN_ALIGN(_text); | |
900 | unsigned long size = PFN_ALIGN(_etext) - start; | |
901 | ||
902 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); | |
903 | printk(KERN_INFO "Write protecting the kernel text: %luk\n", | |
904 | size >> 10); | |
905 | ||
906 | kernel_set_to_readonly = 1; | |
907 | ||
908 | #ifdef CONFIG_CPA_DEBUG | |
909 | printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", | |
910 | start, start+size); | |
911 | set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); | |
912 | ||
913 | printk(KERN_INFO "Testing CPA: write protecting again\n"); | |
914 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); | |
915 | #endif | |
916 | ||
917 | start += size; | |
918 | size = (unsigned long)__end_rodata - start; | |
919 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); | |
920 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", | |
921 | size >> 10); | |
922 | ||
923 | #ifdef CONFIG_CPA_DEBUG | |
924 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); | |
925 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); | |
926 | ||
927 | printk(KERN_INFO "Testing CPA: write protecting again\n"); | |
928 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); | |
929 | #endif | |
930 | mark_nxdata_nx(); | |
931 | if (__supported_pte_mask & _PAGE_NX) | |
932 | debug_checkwx(); | |
933 | } |