]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
6 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> | |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
11034d55 | 21 | #include <linux/initrd.h> |
1da177e4 LT |
22 | #include <linux/pagemap.h> |
23 | #include <linux/bootmem.h> | |
24 | #include <linux/proc_fs.h> | |
59170891 | 25 | #include <linux/pci.h> |
6fb14755 | 26 | #include <linux/pfn.h> |
c9cf5528 | 27 | #include <linux/poison.h> |
17a941d8 | 28 | #include <linux/dma-mapping.h> |
44df75e6 MT |
29 | #include <linux/module.h> |
30 | #include <linux/memory_hotplug.h> | |
ae32b129 | 31 | #include <linux/nmi.h> |
1da177e4 LT |
32 | |
33 | #include <asm/processor.h> | |
34 | #include <asm/system.h> | |
35 | #include <asm/uaccess.h> | |
36 | #include <asm/pgtable.h> | |
37 | #include <asm/pgalloc.h> | |
38 | #include <asm/dma.h> | |
39 | #include <asm/fixmap.h> | |
40 | #include <asm/e820.h> | |
41 | #include <asm/apic.h> | |
42 | #include <asm/tlb.h> | |
43 | #include <asm/mmu_context.h> | |
44 | #include <asm/proto.h> | |
45 | #include <asm/smp.h> | |
2bc0414e | 46 | #include <asm/sections.h> |
718fc13b | 47 | #include <asm/kdebug.h> |
aaa64e04 | 48 | #include <asm/numa.h> |
7bfeab9a | 49 | #include <asm/cacheflush.h> |
1da177e4 | 50 | |
064d25f1 YL |
51 | /* |
52 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. | |
53 | * The direct mapping extends to max_pfn_mapped, so that we can directly access | |
54 | * apertures, ACPI and other tables without having to play with fixmaps. | |
55 | */ | |
f361a450 | 56 | unsigned long max_low_pfn_mapped; |
064d25f1 YL |
57 | unsigned long max_pfn_mapped; |
58 | ||
e18c6874 AK |
59 | static unsigned long dma_reserve __initdata; |
60 | ||
1da177e4 LT |
61 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
62 | ||
a06de630 | 63 | int direct_gbpages |
00d1c5e0 IM |
64 | #ifdef CONFIG_DIRECT_GBPAGES |
65 | = 1 | |
66 | #endif | |
67 | ; | |
68 | ||
69 | static int __init parse_direct_gbpages_off(char *arg) | |
70 | { | |
71 | direct_gbpages = 0; | |
72 | return 0; | |
73 | } | |
74 | early_param("nogbpages", parse_direct_gbpages_off); | |
75 | ||
76 | static int __init parse_direct_gbpages_on(char *arg) | |
77 | { | |
78 | direct_gbpages = 1; | |
79 | return 0; | |
80 | } | |
81 | early_param("gbpages", parse_direct_gbpages_on); | |
82 | ||
1da177e4 LT |
83 | /* |
84 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
85 | * physical space so we can cache the place of the first one and move | |
86 | * around without checking the pgd every time. | |
87 | */ | |
88 | ||
1da177e4 LT |
89 | int after_bootmem; |
90 | ||
8d6ea967 MS |
91 | /* |
92 | * NOTE: This function is marked __ref because it calls __init function | |
93 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | |
94 | */ | |
95 | static __ref void *spp_getpage(void) | |
14a62c34 | 96 | { |
1da177e4 | 97 | void *ptr; |
14a62c34 | 98 | |
1da177e4 | 99 | if (after_bootmem) |
14a62c34 | 100 | ptr = (void *) get_zeroed_page(GFP_ATOMIC); |
1da177e4 LT |
101 | else |
102 | ptr = alloc_bootmem_pages(PAGE_SIZE); | |
14a62c34 TG |
103 | |
104 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | |
105 | panic("set_pte_phys: cannot allocate page data %s\n", | |
106 | after_bootmem ? "after bootmem" : ""); | |
107 | } | |
1da177e4 | 108 | |
10f22dde | 109 | pr_debug("spp_getpage %p\n", ptr); |
14a62c34 | 110 | |
1da177e4 | 111 | return ptr; |
14a62c34 | 112 | } |
1da177e4 | 113 | |
d494a961 | 114 | void |
0814e0ba | 115 | set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) |
1da177e4 | 116 | { |
1da177e4 LT |
117 | pud_t *pud; |
118 | pmd_t *pmd; | |
d494a961 | 119 | pte_t *pte; |
1da177e4 | 120 | |
0814e0ba | 121 | pud = pud_page + pud_index(vaddr); |
1da177e4 | 122 | if (pud_none(*pud)) { |
14a62c34 | 123 | pmd = (pmd_t *) spp_getpage(); |
bb23e403 | 124 | pud_populate(&init_mm, pud, pmd); |
1da177e4 | 125 | if (pmd != pmd_offset(pud, 0)) { |
10f22dde | 126 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", |
14a62c34 | 127 | pmd, pmd_offset(pud, 0)); |
1da177e4 LT |
128 | return; |
129 | } | |
130 | } | |
131 | pmd = pmd_offset(pud, vaddr); | |
132 | if (pmd_none(*pmd)) { | |
133 | pte = (pte_t *) spp_getpage(); | |
bb23e403 | 134 | pmd_populate_kernel(&init_mm, pmd, pte); |
1da177e4 | 135 | if (pte != pte_offset_kernel(pmd, 0)) { |
10f22dde | 136 | printk(KERN_ERR "PAGETABLE BUG #02!\n"); |
1da177e4 LT |
137 | return; |
138 | } | |
139 | } | |
1da177e4 LT |
140 | |
141 | pte = pte_offset_kernel(pmd, vaddr); | |
70c9f590 | 142 | if (!pte_none(*pte) && pte_val(new_pte) && |
1da177e4 LT |
143 | pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask)) |
144 | pte_ERROR(*pte); | |
145 | set_pte(pte, new_pte); | |
146 | ||
147 | /* | |
148 | * It's enough to flush this one mapping. | |
149 | * (PGE mappings get flushed as well) | |
150 | */ | |
151 | __flush_tlb_one(vaddr); | |
152 | } | |
153 | ||
0814e0ba EH |
154 | void |
155 | set_pte_vaddr(unsigned long vaddr, pte_t pteval) | |
156 | { | |
157 | pgd_t *pgd; | |
158 | pud_t *pud_page; | |
159 | ||
160 | pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); | |
161 | ||
162 | pgd = pgd_offset_k(vaddr); | |
163 | if (pgd_none(*pgd)) { | |
164 | printk(KERN_ERR | |
165 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
166 | return; | |
167 | } | |
168 | pud_page = (pud_t*)pgd_page_vaddr(*pgd); | |
169 | set_pte_vaddr_pud(pud_page, vaddr, pteval); | |
170 | } | |
171 | ||
3a9e189d JS |
172 | /* |
173 | * Create large page table mappings for a range of physical addresses. | |
174 | */ | |
175 | static void __init __init_extra_mapping(unsigned long phys, unsigned long size, | |
176 | pgprot_t prot) | |
177 | { | |
178 | pgd_t *pgd; | |
179 | pud_t *pud; | |
180 | pmd_t *pmd; | |
181 | ||
182 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); | |
183 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { | |
184 | pgd = pgd_offset_k((unsigned long)__va(phys)); | |
185 | if (pgd_none(*pgd)) { | |
186 | pud = (pud_t *) spp_getpage(); | |
187 | set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | | |
188 | _PAGE_USER)); | |
189 | } | |
190 | pud = pud_offset(pgd, (unsigned long)__va(phys)); | |
191 | if (pud_none(*pud)) { | |
192 | pmd = (pmd_t *) spp_getpage(); | |
193 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | | |
194 | _PAGE_USER)); | |
195 | } | |
196 | pmd = pmd_offset(pud, phys); | |
197 | BUG_ON(!pmd_none(*pmd)); | |
198 | set_pmd(pmd, __pmd(phys | pgprot_val(prot))); | |
199 | } | |
200 | } | |
201 | ||
202 | void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) | |
203 | { | |
204 | __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE); | |
205 | } | |
206 | ||
207 | void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |
208 | { | |
209 | __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE); | |
210 | } | |
211 | ||
31eedd82 | 212 | /* |
88f3aec7 IM |
213 | * The head.S code sets up the kernel high mapping: |
214 | * | |
215 | * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) | |
31eedd82 TG |
216 | * |
217 | * phys_addr holds the negative offset to the kernel, which is added | |
218 | * to the compile time generated pmds. This results in invalid pmds up | |
219 | * to the point where we hit the physaddr 0 mapping. | |
220 | * | |
221 | * We limit the mappings to the region from _text to _end. _end is | |
222 | * rounded up to the 2MB boundary. This catches the invalid pmds as | |
223 | * well, as they are located before _text: | |
224 | */ | |
225 | void __init cleanup_highmap(void) | |
226 | { | |
227 | unsigned long vaddr = __START_KERNEL_map; | |
228 | unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; | |
229 | pmd_t *pmd = level2_kernel_pgt; | |
230 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | |
231 | ||
232 | for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { | |
2884f110 | 233 | if (pmd_none(*pmd)) |
31eedd82 TG |
234 | continue; |
235 | if (vaddr < (unsigned long) _text || vaddr > end) | |
236 | set_pmd(pmd, __pmd(0)); | |
237 | } | |
238 | } | |
239 | ||
75175278 AK |
240 | static unsigned long __initdata table_start; |
241 | static unsigned long __meminitdata table_end; | |
d86623a0 | 242 | static unsigned long __meminitdata table_top; |
1da177e4 | 243 | |
9482ac6e | 244 | static __ref void *alloc_low_page(unsigned long *phys) |
14a62c34 | 245 | { |
dafe41ee | 246 | unsigned long pfn = table_end++; |
1da177e4 LT |
247 | void *adr; |
248 | ||
44df75e6 MT |
249 | if (after_bootmem) { |
250 | adr = (void *)get_zeroed_page(GFP_ATOMIC); | |
251 | *phys = __pa(adr); | |
14a62c34 | 252 | |
44df75e6 MT |
253 | return adr; |
254 | } | |
255 | ||
d86623a0 | 256 | if (pfn >= table_top) |
14a62c34 | 257 | panic("alloc_low_page: ran out of memory"); |
dafe41ee VG |
258 | |
259 | adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE); | |
44df75e6 | 260 | memset(adr, 0, PAGE_SIZE); |
dafe41ee VG |
261 | *phys = pfn * PAGE_SIZE; |
262 | return adr; | |
263 | } | |
1da177e4 | 264 | |
9482ac6e | 265 | static __ref void unmap_low_page(void *adr) |
14a62c34 | 266 | { |
44df75e6 MT |
267 | if (after_bootmem) |
268 | return; | |
269 | ||
dafe41ee | 270 | early_iounmap(adr, PAGE_SIZE); |
14a62c34 | 271 | } |
1da177e4 | 272 | |
7b16eb89 | 273 | static unsigned long __meminit |
b27a43c1 SS |
274 | phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, |
275 | pgprot_t prot) | |
4f9c11dd JF |
276 | { |
277 | unsigned pages = 0; | |
7b16eb89 | 278 | unsigned long last_map_addr = end; |
4f9c11dd | 279 | int i; |
7b16eb89 | 280 | |
4f9c11dd JF |
281 | pte_t *pte = pte_page + pte_index(addr); |
282 | ||
283 | for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) { | |
284 | ||
285 | if (addr >= end) { | |
286 | if (!after_bootmem) { | |
287 | for(; i < PTRS_PER_PTE; i++, pte++) | |
288 | set_pte(pte, __pte(0)); | |
289 | } | |
290 | break; | |
291 | } | |
292 | ||
b27a43c1 SS |
293 | /* |
294 | * We will re-use the existing mapping. | |
295 | * Xen for example has some special requirements, like mapping | |
296 | * pagetable pages as RO. So assume someone who pre-setup | |
297 | * these mappings are more intelligent. | |
298 | */ | |
4f9c11dd | 299 | if (pte_val(*pte)) |
b27a43c1 | 300 | continue; |
4f9c11dd JF |
301 | |
302 | if (0) | |
303 | printk(" pte=%p addr=%lx pte=%016lx\n", | |
304 | pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); | |
a2699e47 | 305 | pages++; |
b27a43c1 | 306 | set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot)); |
7b16eb89 | 307 | last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE; |
4f9c11dd | 308 | } |
a2699e47 | 309 | |
b27a43c1 | 310 | update_page_count(PG_LEVEL_4K, pages); |
7b16eb89 YL |
311 | |
312 | return last_map_addr; | |
4f9c11dd JF |
313 | } |
314 | ||
7b16eb89 | 315 | static unsigned long __meminit |
b27a43c1 SS |
316 | phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end, |
317 | pgprot_t prot) | |
4f9c11dd JF |
318 | { |
319 | pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); | |
320 | ||
b27a43c1 | 321 | return phys_pte_init(pte, address, end, prot); |
4f9c11dd JF |
322 | } |
323 | ||
cc615032 | 324 | static unsigned long __meminit |
b50efd2a | 325 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, |
b27a43c1 | 326 | unsigned long page_size_mask, pgprot_t prot) |
44df75e6 | 327 | { |
ce0c0e50 | 328 | unsigned long pages = 0; |
7b16eb89 | 329 | unsigned long last_map_addr = end; |
ce0c0e50 | 330 | |
6ad91658 | 331 | int i = pmd_index(address); |
44df75e6 | 332 | |
6ad91658 | 333 | for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { |
4f9c11dd | 334 | unsigned long pte_phys; |
6ad91658 | 335 | pmd_t *pmd = pmd_page + pmd_index(address); |
4f9c11dd | 336 | pte_t *pte; |
b27a43c1 | 337 | pgprot_t new_prot = prot; |
44df75e6 | 338 | |
5f51e139 | 339 | if (address >= end) { |
14a62c34 | 340 | if (!after_bootmem) { |
5f51e139 JB |
341 | for (; i < PTRS_PER_PMD; i++, pmd++) |
342 | set_pmd(pmd, __pmd(0)); | |
14a62c34 | 343 | } |
44df75e6 MT |
344 | break; |
345 | } | |
6ad91658 | 346 | |
4f9c11dd | 347 | if (pmd_val(*pmd)) { |
8ae3a5a8 JB |
348 | if (!pmd_large(*pmd)) { |
349 | spin_lock(&init_mm.page_table_lock); | |
7b16eb89 | 350 | last_map_addr = phys_pte_update(pmd, address, |
b27a43c1 | 351 | end, prot); |
8ae3a5a8 | 352 | spin_unlock(&init_mm.page_table_lock); |
a2699e47 | 353 | continue; |
8ae3a5a8 | 354 | } |
b27a43c1 SS |
355 | /* |
356 | * If we are ok with PG_LEVEL_2M mapping, then we will | |
357 | * use the existing mapping, | |
358 | * | |
359 | * Otherwise, we will split the large page mapping but | |
360 | * use the same existing protection bits except for | |
361 | * large page, so that we don't violate Intel's TLB | |
362 | * Application note (317080) which says, while changing | |
363 | * the page sizes, new and old translations should | |
364 | * not differ with respect to page frame and | |
365 | * attributes. | |
366 | */ | |
367 | if (page_size_mask & (1 << PG_LEVEL_2M)) | |
368 | continue; | |
369 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); | |
4f9c11dd JF |
370 | } |
371 | ||
b50efd2a | 372 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
4f9c11dd | 373 | pages++; |
8ae3a5a8 | 374 | spin_lock(&init_mm.page_table_lock); |
4f9c11dd | 375 | set_pte((pte_t *)pmd, |
b27a43c1 SS |
376 | pfn_pte(address >> PAGE_SHIFT, |
377 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); | |
8ae3a5a8 | 378 | spin_unlock(&init_mm.page_table_lock); |
7b16eb89 | 379 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; |
6ad91658 | 380 | continue; |
4f9c11dd | 381 | } |
6ad91658 | 382 | |
4f9c11dd | 383 | pte = alloc_low_page(&pte_phys); |
b27a43c1 | 384 | last_map_addr = phys_pte_init(pte, address, end, new_prot); |
4f9c11dd JF |
385 | unmap_low_page(pte); |
386 | ||
8ae3a5a8 | 387 | spin_lock(&init_mm.page_table_lock); |
4f9c11dd | 388 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); |
8ae3a5a8 | 389 | spin_unlock(&init_mm.page_table_lock); |
44df75e6 | 390 | } |
b27a43c1 | 391 | update_page_count(PG_LEVEL_2M, pages); |
7b16eb89 | 392 | return last_map_addr; |
44df75e6 MT |
393 | } |
394 | ||
cc615032 | 395 | static unsigned long __meminit |
b50efd2a | 396 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, |
b27a43c1 | 397 | unsigned long page_size_mask, pgprot_t prot) |
44df75e6 | 398 | { |
14a62c34 | 399 | pmd_t *pmd = pmd_offset(pud, 0); |
cc615032 AK |
400 | unsigned long last_map_addr; |
401 | ||
b27a43c1 | 402 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot); |
6ad91658 | 403 | __flush_tlb_all(); |
cc615032 | 404 | return last_map_addr; |
44df75e6 MT |
405 | } |
406 | ||
cc615032 | 407 | static unsigned long __meminit |
b50efd2a YL |
408 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, |
409 | unsigned long page_size_mask) | |
14a62c34 | 410 | { |
ce0c0e50 | 411 | unsigned long pages = 0; |
cc615032 | 412 | unsigned long last_map_addr = end; |
6ad91658 | 413 | int i = pud_index(addr); |
44df75e6 | 414 | |
14a62c34 | 415 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { |
6ad91658 KM |
416 | unsigned long pmd_phys; |
417 | pud_t *pud = pud_page + pud_index(addr); | |
1da177e4 | 418 | pmd_t *pmd; |
b27a43c1 | 419 | pgprot_t prot = PAGE_KERNEL; |
1da177e4 | 420 | |
6ad91658 | 421 | if (addr >= end) |
1da177e4 | 422 | break; |
1da177e4 | 423 | |
14a62c34 TG |
424 | if (!after_bootmem && |
425 | !e820_any_mapped(addr, addr+PUD_SIZE, 0)) { | |
426 | set_pud(pud, __pud(0)); | |
1da177e4 | 427 | continue; |
14a62c34 | 428 | } |
1da177e4 | 429 | |
6ad91658 | 430 | if (pud_val(*pud)) { |
a2699e47 | 431 | if (!pud_large(*pud)) { |
b50efd2a | 432 | last_map_addr = phys_pmd_update(pud, addr, end, |
b27a43c1 | 433 | page_size_mask, prot); |
a2699e47 SS |
434 | continue; |
435 | } | |
b27a43c1 SS |
436 | /* |
437 | * If we are ok with PG_LEVEL_1G mapping, then we will | |
438 | * use the existing mapping. | |
439 | * | |
440 | * Otherwise, we will split the gbpage mapping but use | |
441 | * the same existing protection bits except for large | |
442 | * page, so that we don't violate Intel's TLB | |
443 | * Application note (317080) which says, while changing | |
444 | * the page sizes, new and old translations should | |
445 | * not differ with respect to page frame and | |
446 | * attributes. | |
447 | */ | |
448 | if (page_size_mask & (1 << PG_LEVEL_1G)) | |
449 | continue; | |
450 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); | |
ef925766 AK |
451 | } |
452 | ||
b50efd2a | 453 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
ce0c0e50 | 454 | pages++; |
8ae3a5a8 | 455 | spin_lock(&init_mm.page_table_lock); |
ef925766 AK |
456 | set_pte((pte_t *)pud, |
457 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | |
8ae3a5a8 | 458 | spin_unlock(&init_mm.page_table_lock); |
cc615032 | 459 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; |
6ad91658 KM |
460 | continue; |
461 | } | |
462 | ||
dafe41ee | 463 | pmd = alloc_low_page(&pmd_phys); |
b27a43c1 SS |
464 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, |
465 | prot); | |
4f9c11dd | 466 | unmap_low_page(pmd); |
8ae3a5a8 JB |
467 | |
468 | spin_lock(&init_mm.page_table_lock); | |
4f9c11dd | 469 | pud_populate(&init_mm, pud, __va(pmd_phys)); |
44df75e6 | 470 | spin_unlock(&init_mm.page_table_lock); |
1da177e4 | 471 | } |
1a2b4412 | 472 | __flush_tlb_all(); |
a2699e47 | 473 | |
b27a43c1 | 474 | update_page_count(PG_LEVEL_1G, pages); |
cc615032 | 475 | |
1a0db38e | 476 | return last_map_addr; |
14a62c34 | 477 | } |
1da177e4 | 478 | |
4f9c11dd | 479 | static unsigned long __meminit |
b50efd2a YL |
480 | phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end, |
481 | unsigned long page_size_mask) | |
4f9c11dd JF |
482 | { |
483 | pud_t *pud; | |
484 | ||
485 | pud = (pud_t *)pgd_page_vaddr(*pgd); | |
486 | ||
b50efd2a | 487 | return phys_pud_init(pud, addr, end, page_size_mask); |
4f9c11dd JF |
488 | } |
489 | ||
0b8fdcbc SS |
490 | static void __init find_early_table_space(unsigned long end, int use_pse, |
491 | int use_gbpages) | |
1da177e4 | 492 | { |
c2e6d65b | 493 | unsigned long puds, pmds, ptes, tables, start; |
1da177e4 LT |
494 | |
495 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | |
ef925766 | 496 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); |
0b8fdcbc | 497 | if (use_gbpages) { |
c2e6d65b YL |
498 | unsigned long extra; |
499 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); | |
500 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; | |
501 | } else | |
502 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | |
503 | tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | |
504 | ||
0b8fdcbc | 505 | if (use_pse) { |
c2e6d65b YL |
506 | unsigned long extra; |
507 | extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); | |
508 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
509 | } else | |
510 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
511 | tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); | |
1da177e4 | 512 | |
14a62c34 TG |
513 | /* |
514 | * RED-PEN putting page tables only on node 0 could | |
515 | * cause a hotspot and fill up ZONE_DMA. The page tables | |
516 | * need roughly 0.5KB per GB. | |
517 | */ | |
518 | start = 0x8000; | |
24a5da73 | 519 | table_start = find_e820_area(start, end, tables, PAGE_SIZE); |
1da177e4 LT |
520 | if (table_start == -1UL) |
521 | panic("Cannot find space for the kernel page tables"); | |
522 | ||
523 | table_start >>= PAGE_SHIFT; | |
524 | table_end = table_start; | |
d86623a0 | 525 | table_top = table_start + (tables >> PAGE_SHIFT); |
44df75e6 | 526 | |
d86623a0 YL |
527 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", |
528 | end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT); | |
1da177e4 LT |
529 | } |
530 | ||
ef925766 AK |
531 | static void __init init_gbpages(void) |
532 | { | |
533 | if (direct_gbpages && cpu_has_gbpages) | |
534 | printk(KERN_INFO "Using GB pages for direct mapping\n"); | |
535 | else | |
536 | direct_gbpages = 0; | |
537 | } | |
538 | ||
b50efd2a YL |
539 | static unsigned long __init kernel_physical_mapping_init(unsigned long start, |
540 | unsigned long end, | |
541 | unsigned long page_size_mask) | |
14a62c34 | 542 | { |
1da177e4 | 543 | |
b27a43c1 | 544 | unsigned long next, last_map_addr = end; |
1da177e4 | 545 | |
b27a43c1 SS |
546 | start = (unsigned long)__va(start); |
547 | end = (unsigned long)__va(end); | |
1da177e4 LT |
548 | |
549 | for (; start < end; start = next) { | |
44df75e6 | 550 | pgd_t *pgd = pgd_offset_k(start); |
14a62c34 | 551 | unsigned long pud_phys; |
44df75e6 MT |
552 | pud_t *pud; |
553 | ||
e22146e6 | 554 | next = (start + PGDIR_SIZE) & PGDIR_MASK; |
4f9c11dd JF |
555 | if (next > end) |
556 | next = end; | |
557 | ||
558 | if (pgd_val(*pgd)) { | |
b50efd2a YL |
559 | last_map_addr = phys_pud_update(pgd, __pa(start), |
560 | __pa(end), page_size_mask); | |
4f9c11dd JF |
561 | continue; |
562 | } | |
563 | ||
8ae3a5a8 | 564 | pud = alloc_low_page(&pud_phys); |
b50efd2a YL |
565 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), |
566 | page_size_mask); | |
4f9c11dd | 567 | unmap_low_page(pud); |
8ae3a5a8 JB |
568 | |
569 | spin_lock(&init_mm.page_table_lock); | |
570 | pgd_populate(&init_mm, pgd, __va(pud_phys)); | |
571 | spin_unlock(&init_mm.page_table_lock); | |
14a62c34 | 572 | } |
a2699e47 SS |
573 | __flush_tlb_all(); |
574 | ||
b50efd2a YL |
575 | return last_map_addr; |
576 | } | |
7b16eb89 YL |
577 | |
578 | struct map_range { | |
579 | unsigned long start; | |
580 | unsigned long end; | |
581 | unsigned page_size_mask; | |
582 | }; | |
583 | ||
584 | #define NR_RANGE_MR 5 | |
585 | ||
586 | static int save_mr(struct map_range *mr, int nr_range, | |
587 | unsigned long start_pfn, unsigned long end_pfn, | |
588 | unsigned long page_size_mask) | |
589 | { | |
590 | ||
591 | if (start_pfn < end_pfn) { | |
592 | if (nr_range >= NR_RANGE_MR) | |
593 | panic("run out of range for init_memory_mapping\n"); | |
594 | mr[nr_range].start = start_pfn<<PAGE_SHIFT; | |
595 | mr[nr_range].end = end_pfn<<PAGE_SHIFT; | |
596 | mr[nr_range].page_size_mask = page_size_mask; | |
597 | nr_range++; | |
598 | } | |
599 | ||
600 | return nr_range; | |
601 | } | |
602 | ||
b50efd2a YL |
603 | /* |
604 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | |
605 | * This runs before bootmem is initialized and gets pages directly from | |
606 | * the physical memory. To access them they are temporarily mapped. | |
607 | */ | |
608 | unsigned long __init_refok init_memory_mapping(unsigned long start, | |
609 | unsigned long end) | |
610 | { | |
7b16eb89 | 611 | unsigned long last_map_addr = 0; |
b50efd2a | 612 | unsigned long page_size_mask = 0; |
c2e6d65b | 613 | unsigned long start_pfn, end_pfn; |
b50efd2a | 614 | |
7b16eb89 YL |
615 | struct map_range mr[NR_RANGE_MR]; |
616 | int nr_range, i; | |
0b8fdcbc | 617 | int use_pse, use_gbpages; |
7b16eb89 | 618 | |
b50efd2a YL |
619 | printk(KERN_INFO "init_memory_mapping\n"); |
620 | ||
621 | /* | |
622 | * Find space for the kernel direct mapping tables. | |
623 | * | |
624 | * Later we should allocate these tables in the local node of the | |
625 | * memory mapped. Unfortunately this is done currently before the | |
626 | * nodes are discovered. | |
627 | */ | |
7b16eb89 | 628 | if (!after_bootmem) |
b50efd2a | 629 | init_gbpages(); |
b50efd2a | 630 | |
0b8fdcbc SS |
631 | #ifdef CONFIG_DEBUG_PAGEALLOC |
632 | /* | |
633 | * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. | |
634 | * This will simplify cpa(), which otherwise needs to support splitting | |
635 | * large pages into small in interrupt context, etc. | |
636 | */ | |
637 | use_pse = use_gbpages = 0; | |
638 | #else | |
639 | use_pse = cpu_has_pse; | |
640 | use_gbpages = direct_gbpages; | |
641 | #endif | |
642 | ||
643 | if (use_gbpages) | |
b50efd2a | 644 | page_size_mask |= 1 << PG_LEVEL_1G; |
0b8fdcbc | 645 | if (use_pse) |
b50efd2a YL |
646 | page_size_mask |= 1 << PG_LEVEL_2M; |
647 | ||
7b16eb89 YL |
648 | memset(mr, 0, sizeof(mr)); |
649 | nr_range = 0; | |
650 | ||
651 | /* head if not big page alignment ?*/ | |
c2e6d65b YL |
652 | start_pfn = start >> PAGE_SHIFT; |
653 | end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT) | |
654 | << (PMD_SHIFT - PAGE_SHIFT); | |
7b16eb89 | 655 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
c2e6d65b YL |
656 | |
657 | /* big page (2M) range*/ | |
658 | start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) | |
659 | << (PMD_SHIFT - PAGE_SHIFT); | |
660 | end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT) | |
661 | << (PUD_SHIFT - PAGE_SHIFT); | |
662 | if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT))) | |
663 | end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)); | |
7b16eb89 YL |
664 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
665 | page_size_mask & (1<<PG_LEVEL_2M)); | |
c2e6d65b YL |
666 | |
667 | /* big page (1G) range */ | |
668 | start_pfn = end_pfn; | |
669 | end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); | |
7b16eb89 YL |
670 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
671 | page_size_mask & | |
672 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); | |
c2e6d65b YL |
673 | |
674 | /* tail is not big page (1G) alignment */ | |
675 | start_pfn = end_pfn; | |
676 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | |
7b16eb89 YL |
677 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
678 | page_size_mask & (1<<PG_LEVEL_2M)); | |
679 | ||
c2e6d65b YL |
680 | /* tail is not big page (2M) alignment */ |
681 | start_pfn = end_pfn; | |
682 | end_pfn = end>>PAGE_SHIFT; | |
7b16eb89 YL |
683 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
684 | ||
9958e810 YL |
685 | /* try to merge same page size and continuous */ |
686 | for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { | |
687 | unsigned long old_start; | |
688 | if (mr[i].end != mr[i+1].start || | |
689 | mr[i].page_size_mask != mr[i+1].page_size_mask) | |
690 | continue; | |
691 | /* move it */ | |
692 | old_start = mr[i].start; | |
693 | memmove(&mr[i], &mr[i+1], | |
694 | (nr_range - 1 - i) * sizeof (struct map_range)); | |
695 | mr[i].start = old_start; | |
696 | nr_range--; | |
697 | } | |
698 | ||
7b16eb89 YL |
699 | for (i = 0; i < nr_range; i++) |
700 | printk(KERN_DEBUG " %010lx - %010lx page %s\n", | |
701 | mr[i].start, mr[i].end, | |
702 | (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( | |
703 | (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); | |
704 | ||
705 | if (!after_bootmem) | |
0b8fdcbc | 706 | find_early_table_space(end, use_pse, use_gbpages); |
7b16eb89 YL |
707 | |
708 | for (i = 0; i < nr_range; i++) | |
c2e6d65b | 709 | last_map_addr = kernel_physical_mapping_init( |
7b16eb89 YL |
710 | mr[i].start, mr[i].end, |
711 | mr[i].page_size_mask); | |
b50efd2a | 712 | |
44df75e6 | 713 | if (!after_bootmem) |
f51c9452 | 714 | mmu_cr4_features = read_cr4(); |
1da177e4 | 715 | __flush_tlb_all(); |
75175278 | 716 | |
b50efd2a | 717 | if (!after_bootmem && table_end > table_start) |
24a5da73 YL |
718 | reserve_early(table_start << PAGE_SHIFT, |
719 | table_end << PAGE_SHIFT, "PGTABLE"); | |
272b9cad | 720 | |
b50efd2a YL |
721 | printk(KERN_INFO "last_map_addr: %lx end: %lx\n", |
722 | last_map_addr, end); | |
723 | ||
272b9cad | 724 | if (!after_bootmem) |
b50efd2a | 725 | early_memtest(start, end); |
cc615032 | 726 | |
1a0db38e | 727 | return last_map_addr >> PAGE_SHIFT; |
1da177e4 LT |
728 | } |
729 | ||
2b97690f | 730 | #ifndef CONFIG_NUMA |
1f75d7e3 YL |
731 | void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn) |
732 | { | |
733 | unsigned long bootmap_size, bootmap; | |
734 | ||
735 | bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; | |
736 | bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size, | |
737 | PAGE_SIZE); | |
738 | if (bootmap == -1L) | |
739 | panic("Cannot find bootmem map of size %ld\n", bootmap_size); | |
346cafec YL |
740 | /* don't touch min_low_pfn */ |
741 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT, | |
742 | 0, end_pfn); | |
1f75d7e3 YL |
743 | e820_register_active_regions(0, start_pfn, end_pfn); |
744 | free_bootmem_with_active_regions(0, end_pfn); | |
745 | early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT); | |
746 | reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT); | |
747 | } | |
748 | ||
1da177e4 LT |
749 | void __init paging_init(void) |
750 | { | |
6391af17 | 751 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
14a62c34 | 752 | |
6391af17 MG |
753 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
754 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | |
755 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; | |
c987d12f | 756 | max_zone_pfns[ZONE_NORMAL] = max_pfn; |
6391af17 | 757 | |
c987d12f | 758 | memory_present(0, 0, max_pfn); |
44df75e6 | 759 | sparse_init(); |
5cb248ab | 760 | free_area_init_nodes(max_zone_pfns); |
1da177e4 LT |
761 | } |
762 | #endif | |
763 | ||
44df75e6 MT |
764 | /* |
765 | * Memory hotplug specific functions | |
44df75e6 | 766 | */ |
bc02af93 | 767 | #ifdef CONFIG_MEMORY_HOTPLUG |
9d99aaa3 AK |
768 | /* |
769 | * Memory is added always to NORMAL zone. This means you will never get | |
770 | * additional DMA/DMA32 memory. | |
771 | */ | |
bc02af93 | 772 | int arch_add_memory(int nid, u64 start, u64 size) |
44df75e6 | 773 | { |
bc02af93 | 774 | struct pglist_data *pgdat = NODE_DATA(nid); |
776ed98b | 775 | struct zone *zone = pgdat->node_zones + ZONE_NORMAL; |
cc615032 | 776 | unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT; |
44df75e6 MT |
777 | unsigned long nr_pages = size >> PAGE_SHIFT; |
778 | int ret; | |
779 | ||
cc615032 AK |
780 | last_mapped_pfn = init_memory_mapping(start, start + size-1); |
781 | if (last_mapped_pfn > max_pfn_mapped) | |
782 | max_pfn_mapped = last_mapped_pfn; | |
45e0b78b | 783 | |
44df75e6 | 784 | ret = __add_pages(zone, start_pfn, nr_pages); |
10f22dde | 785 | WARN_ON(1); |
44df75e6 | 786 | |
44df75e6 | 787 | return ret; |
44df75e6 | 788 | } |
bc02af93 | 789 | EXPORT_SYMBOL_GPL(arch_add_memory); |
44df75e6 | 790 | |
8243229f | 791 | #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) |
4942e998 KM |
792 | int memory_add_physaddr_to_nid(u64 start) |
793 | { | |
794 | return 0; | |
795 | } | |
8c2676a5 | 796 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
4942e998 KM |
797 | #endif |
798 | ||
45e0b78b KM |
799 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
800 | ||
ae531c26 AV |
801 | /* |
802 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address | |
803 | * is valid. The argument is a physical page number. | |
804 | * | |
805 | * | |
806 | * On x86, access has to be given to the first megabyte of ram because that area | |
807 | * contains bios code and data regions used by X and dosemu and similar apps. | |
808 | * Access has to be given to non-kernel-ram areas as well, these contain the PCI | |
809 | * mmio resources as well as potential bios/acpi data regions. | |
810 | */ | |
811 | int devmem_is_allowed(unsigned long pagenr) | |
812 | { | |
813 | if (pagenr <= 256) | |
814 | return 1; | |
815 | if (!page_is_ram(pagenr)) | |
816 | return 1; | |
817 | return 0; | |
818 | } | |
819 | ||
820 | ||
14a62c34 TG |
821 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, |
822 | kcore_modules, kcore_vsyscall; | |
1da177e4 LT |
823 | |
824 | void __init mem_init(void) | |
825 | { | |
0a43e4bf | 826 | long codesize, reservedpages, datasize, initsize; |
1da177e4 | 827 | |
0dc243ae | 828 | pci_iommu_alloc(); |
1da177e4 | 829 | |
48ddb154 | 830 | /* clear_bss() already clear the empty_zero_page */ |
1da177e4 LT |
831 | |
832 | reservedpages = 0; | |
833 | ||
834 | /* this will put all low memory onto the freelists */ | |
2b97690f | 835 | #ifdef CONFIG_NUMA |
0a43e4bf | 836 | totalram_pages = numa_free_all_bootmem(); |
1da177e4 | 837 | #else |
0a43e4bf | 838 | totalram_pages = free_all_bootmem(); |
1da177e4 | 839 | #endif |
c987d12f YL |
840 | reservedpages = max_pfn - totalram_pages - |
841 | absent_pages_in_range(0, max_pfn); | |
1da177e4 LT |
842 | after_bootmem = 1; |
843 | ||
844 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
845 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
846 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
847 | ||
848 | /* Register memory areas for /proc/kcore */ | |
14a62c34 TG |
849 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); |
850 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
1da177e4 LT |
851 | VMALLOC_END-VMALLOC_START); |
852 | kclist_add(&kcore_kernel, &_stext, _end - _stext); | |
853 | kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN); | |
14a62c34 | 854 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, |
1da177e4 LT |
855 | VSYSCALL_END - VSYSCALL_START); |
856 | ||
10f22dde | 857 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " |
14a62c34 | 858 | "%ldk reserved, %ldk data, %ldk init)\n", |
1da177e4 | 859 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
c987d12f | 860 | max_pfn << (PAGE_SHIFT-10), |
1da177e4 LT |
861 | codesize >> 10, |
862 | reservedpages << (PAGE_SHIFT-10), | |
863 | datasize >> 10, | |
864 | initsize >> 10); | |
1da177e4 LT |
865 | } |
866 | ||
d167a518 | 867 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
1da177e4 | 868 | { |
bfc734b2 | 869 | unsigned long addr = begin; |
1da177e4 | 870 | |
bfc734b2 | 871 | if (addr >= end) |
d167a518 GH |
872 | return; |
873 | ||
ee01f112 IM |
874 | /* |
875 | * If debugging page accesses then do not free this memory but | |
876 | * mark them not present - any buggy init-section access will | |
877 | * create a kernel page fault: | |
878 | */ | |
879 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
880 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | |
881 | begin, PAGE_ALIGN(end)); | |
882 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | |
883 | #else | |
6fb14755 | 884 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
14a62c34 | 885 | |
bfc734b2 | 886 | for (; addr < end; addr += PAGE_SIZE) { |
e3ebadd9 LT |
887 | ClearPageReserved(virt_to_page(addr)); |
888 | init_page_count(virt_to_page(addr)); | |
889 | memset((void *)(addr & ~(PAGE_SIZE-1)), | |
890 | POISON_FREE_INITMEM, PAGE_SIZE); | |
e3ebadd9 | 891 | free_page(addr); |
1da177e4 LT |
892 | totalram_pages++; |
893 | } | |
ee01f112 | 894 | #endif |
d167a518 GH |
895 | } |
896 | ||
897 | void free_initmem(void) | |
898 | { | |
d167a518 | 899 | free_init_pages("unused kernel memory", |
e3ebadd9 LT |
900 | (unsigned long)(&__init_begin), |
901 | (unsigned long)(&__init_end)); | |
1da177e4 LT |
902 | } |
903 | ||
67df197b | 904 | #ifdef CONFIG_DEBUG_RODATA |
edeed305 AV |
905 | const int rodata_test_data = 0xC3; |
906 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
67df197b | 907 | |
67df197b AV |
908 | void mark_rodata_ro(void) |
909 | { | |
4e4eee0e | 910 | unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata); |
8f0f996e SR |
911 | unsigned long rodata_start = |
912 | ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; | |
913 | ||
914 | #ifdef CONFIG_DYNAMIC_FTRACE | |
915 | /* Dynamic tracing modifies the kernel text section */ | |
916 | start = rodata_start; | |
917 | #endif | |
67df197b | 918 | |
6fb14755 | 919 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
e3ebadd9 | 920 | (end - start) >> 10); |
984bb80d AV |
921 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
922 | ||
923 | /* | |
924 | * The rodata section (but not the kernel text!) should also be | |
925 | * not-executable. | |
926 | */ | |
72b59d67 | 927 | set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT); |
67df197b | 928 | |
1a487252 AV |
929 | rodata_test(); |
930 | ||
0c42f392 | 931 | #ifdef CONFIG_CPA_DEBUG |
10f22dde | 932 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); |
6d238cc4 | 933 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 934 | |
10f22dde | 935 | printk(KERN_INFO "Testing CPA: again\n"); |
6d238cc4 | 936 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 937 | #endif |
67df197b | 938 | } |
4e4eee0e | 939 | |
67df197b AV |
940 | #endif |
941 | ||
1da177e4 LT |
942 | #ifdef CONFIG_BLK_DEV_INITRD |
943 | void free_initrd_mem(unsigned long start, unsigned long end) | |
944 | { | |
e3ebadd9 | 945 | free_init_pages("initrd memory", start, end); |
1da177e4 LT |
946 | } |
947 | #endif | |
948 | ||
d2dbf343 YL |
949 | int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, |
950 | int flags) | |
14a62c34 | 951 | { |
2b97690f | 952 | #ifdef CONFIG_NUMA |
8b3cd09e | 953 | int nid, next_nid; |
6a07a0ed | 954 | int ret; |
5e58a02a AK |
955 | #endif |
956 | unsigned long pfn = phys >> PAGE_SHIFT; | |
14a62c34 | 957 | |
c987d12f | 958 | if (pfn >= max_pfn) { |
14a62c34 TG |
959 | /* |
960 | * This can happen with kdump kernels when accessing | |
961 | * firmware tables: | |
962 | */ | |
67794292 | 963 | if (pfn < max_pfn_mapped) |
8b2ef1d7 | 964 | return -EFAULT; |
14a62c34 | 965 | |
6a07a0ed | 966 | printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n", |
5e58a02a | 967 | phys, len); |
8b2ef1d7 | 968 | return -EFAULT; |
5e58a02a AK |
969 | } |
970 | ||
971 | /* Should check here against the e820 map to avoid double free */ | |
972 | #ifdef CONFIG_NUMA | |
8b3cd09e YL |
973 | nid = phys_to_nid(phys); |
974 | next_nid = phys_to_nid(phys + len - 1); | |
975 | if (nid == next_nid) | |
8b2ef1d7 | 976 | ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags); |
8b3cd09e | 977 | else |
8b2ef1d7 BW |
978 | ret = reserve_bootmem(phys, len, flags); |
979 | ||
980 | if (ret != 0) | |
981 | return ret; | |
982 | ||
14a62c34 | 983 | #else |
72a7fe39 | 984 | reserve_bootmem(phys, len, BOOTMEM_DEFAULT); |
1da177e4 | 985 | #endif |
8b3cd09e | 986 | |
0e0b864e | 987 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { |
e18c6874 | 988 | dma_reserve += len / PAGE_SIZE; |
0e0b864e MG |
989 | set_dma_reserve(dma_reserve); |
990 | } | |
8b2ef1d7 BW |
991 | |
992 | return 0; | |
1da177e4 LT |
993 | } |
994 | ||
14a62c34 TG |
995 | int kern_addr_valid(unsigned long addr) |
996 | { | |
1da177e4 | 997 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; |
14a62c34 TG |
998 | pgd_t *pgd; |
999 | pud_t *pud; | |
1000 | pmd_t *pmd; | |
1001 | pte_t *pte; | |
1da177e4 LT |
1002 | |
1003 | if (above != 0 && above != -1UL) | |
14a62c34 TG |
1004 | return 0; |
1005 | ||
1da177e4 LT |
1006 | pgd = pgd_offset_k(addr); |
1007 | if (pgd_none(*pgd)) | |
1008 | return 0; | |
1009 | ||
1010 | pud = pud_offset(pgd, addr); | |
1011 | if (pud_none(*pud)) | |
14a62c34 | 1012 | return 0; |
1da177e4 LT |
1013 | |
1014 | pmd = pmd_offset(pud, addr); | |
1015 | if (pmd_none(*pmd)) | |
1016 | return 0; | |
14a62c34 | 1017 | |
1da177e4 LT |
1018 | if (pmd_large(*pmd)) |
1019 | return pfn_valid(pmd_pfn(*pmd)); | |
1020 | ||
1021 | pte = pte_offset_kernel(pmd, addr); | |
1022 | if (pte_none(*pte)) | |
1023 | return 0; | |
14a62c34 | 1024 | |
1da177e4 LT |
1025 | return pfn_valid(pte_pfn(*pte)); |
1026 | } | |
1027 | ||
14a62c34 TG |
1028 | /* |
1029 | * A pseudo VMA to allow ptrace access for the vsyscall page. This only | |
1030 | * covers the 64bit vsyscall page now. 32bit has a real VMA now and does | |
1031 | * not need special handling anymore: | |
1032 | */ | |
1da177e4 | 1033 | static struct vm_area_struct gate_vma = { |
14a62c34 TG |
1034 | .vm_start = VSYSCALL_START, |
1035 | .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), | |
1036 | .vm_page_prot = PAGE_READONLY_EXEC, | |
1037 | .vm_flags = VM_READ | VM_EXEC | |
1da177e4 LT |
1038 | }; |
1039 | ||
1da177e4 LT |
1040 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) |
1041 | { | |
1042 | #ifdef CONFIG_IA32_EMULATION | |
1e014410 AK |
1043 | if (test_tsk_thread_flag(tsk, TIF_IA32)) |
1044 | return NULL; | |
1da177e4 LT |
1045 | #endif |
1046 | return &gate_vma; | |
1047 | } | |
1048 | ||
1049 | int in_gate_area(struct task_struct *task, unsigned long addr) | |
1050 | { | |
1051 | struct vm_area_struct *vma = get_gate_vma(task); | |
14a62c34 | 1052 | |
1e014410 AK |
1053 | if (!vma) |
1054 | return 0; | |
14a62c34 | 1055 | |
1da177e4 LT |
1056 | return (addr >= vma->vm_start) && (addr < vma->vm_end); |
1057 | } | |
1058 | ||
14a62c34 TG |
1059 | /* |
1060 | * Use this when you have no reliable task/vma, typically from interrupt | |
1061 | * context. It is less reliable than using the task's vma and may give | |
1062 | * false positives: | |
1da177e4 LT |
1063 | */ |
1064 | int in_gate_area_no_task(unsigned long addr) | |
1065 | { | |
1e014410 | 1066 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); |
1da177e4 | 1067 | } |
2e1c49db | 1068 | |
2aae950b AK |
1069 | const char *arch_vma_name(struct vm_area_struct *vma) |
1070 | { | |
1071 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | |
1072 | return "[vdso]"; | |
1073 | if (vma == &gate_vma) | |
1074 | return "[vsyscall]"; | |
1075 | return NULL; | |
1076 | } | |
0889eba5 CL |
1077 | |
1078 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
1079 | /* | |
1080 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | |
1081 | */ | |
c2b91e2e YL |
1082 | static long __meminitdata addr_start, addr_end; |
1083 | static void __meminitdata *p_start, *p_end; | |
1084 | static int __meminitdata node_start; | |
1085 | ||
14a62c34 TG |
1086 | int __meminit |
1087 | vmemmap_populate(struct page *start_page, unsigned long size, int node) | |
0889eba5 CL |
1088 | { |
1089 | unsigned long addr = (unsigned long)start_page; | |
1090 | unsigned long end = (unsigned long)(start_page + size); | |
1091 | unsigned long next; | |
1092 | pgd_t *pgd; | |
1093 | pud_t *pud; | |
1094 | pmd_t *pmd; | |
1095 | ||
1096 | for (; addr < end; addr = next) { | |
7c934d39 | 1097 | void *p = NULL; |
0889eba5 CL |
1098 | |
1099 | pgd = vmemmap_pgd_populate(addr, node); | |
1100 | if (!pgd) | |
1101 | return -ENOMEM; | |
14a62c34 | 1102 | |
0889eba5 CL |
1103 | pud = vmemmap_pud_populate(pgd, addr, node); |
1104 | if (!pud) | |
1105 | return -ENOMEM; | |
1106 | ||
7c934d39 JF |
1107 | if (!cpu_has_pse) { |
1108 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1109 | pmd = vmemmap_pmd_populate(pud, addr, node); | |
1110 | ||
1111 | if (!pmd) | |
1112 | return -ENOMEM; | |
1113 | ||
1114 | p = vmemmap_pte_populate(pmd, addr, node); | |
14a62c34 | 1115 | |
0889eba5 CL |
1116 | if (!p) |
1117 | return -ENOMEM; | |
1118 | ||
7c934d39 JF |
1119 | addr_end = addr + PAGE_SIZE; |
1120 | p_end = p + PAGE_SIZE; | |
14a62c34 | 1121 | } else { |
7c934d39 JF |
1122 | next = pmd_addr_end(addr, end); |
1123 | ||
1124 | pmd = pmd_offset(pud, addr); | |
1125 | if (pmd_none(*pmd)) { | |
1126 | pte_t entry; | |
1127 | ||
1128 | p = vmemmap_alloc_block(PMD_SIZE, node); | |
1129 | if (!p) | |
1130 | return -ENOMEM; | |
1131 | ||
1132 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, | |
1133 | PAGE_KERNEL_LARGE); | |
1134 | set_pmd(pmd, __pmd(pte_val(entry))); | |
1135 | ||
7c934d39 JF |
1136 | /* check to see if we have contiguous blocks */ |
1137 | if (p_end != p || node_start != node) { | |
1138 | if (p_start) | |
1139 | printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | |
1140 | addr_start, addr_end-1, p_start, p_end-1, node_start); | |
1141 | addr_start = addr; | |
1142 | node_start = node; | |
1143 | p_start = p; | |
1144 | } | |
49c980df YL |
1145 | |
1146 | addr_end = addr + PMD_SIZE; | |
1147 | p_end = p + PMD_SIZE; | |
7c934d39 JF |
1148 | } else |
1149 | vmemmap_verify((pte_t *)pmd, node, addr, next); | |
14a62c34 | 1150 | } |
7c934d39 | 1151 | |
0889eba5 | 1152 | } |
0889eba5 CL |
1153 | return 0; |
1154 | } | |
c2b91e2e YL |
1155 | |
1156 | void __meminit vmemmap_populate_print_last(void) | |
1157 | { | |
1158 | if (p_start) { | |
1159 | printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | |
1160 | addr_start, addr_end-1, p_start, p_end-1, node_start); | |
1161 | p_start = NULL; | |
1162 | p_end = NULL; | |
1163 | node_start = 0; | |
1164 | } | |
1165 | } | |
0889eba5 | 1166 | #endif |