]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
6 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> | |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
11034d55 | 21 | #include <linux/initrd.h> |
1da177e4 LT |
22 | #include <linux/pagemap.h> |
23 | #include <linux/bootmem.h> | |
24 | #include <linux/proc_fs.h> | |
59170891 | 25 | #include <linux/pci.h> |
6fb14755 | 26 | #include <linux/pfn.h> |
c9cf5528 | 27 | #include <linux/poison.h> |
17a941d8 | 28 | #include <linux/dma-mapping.h> |
44df75e6 MT |
29 | #include <linux/module.h> |
30 | #include <linux/memory_hotplug.h> | |
ae32b129 | 31 | #include <linux/nmi.h> |
1da177e4 LT |
32 | |
33 | #include <asm/processor.h> | |
34 | #include <asm/system.h> | |
35 | #include <asm/uaccess.h> | |
36 | #include <asm/pgtable.h> | |
37 | #include <asm/pgalloc.h> | |
38 | #include <asm/dma.h> | |
39 | #include <asm/fixmap.h> | |
40 | #include <asm/e820.h> | |
41 | #include <asm/apic.h> | |
42 | #include <asm/tlb.h> | |
43 | #include <asm/mmu_context.h> | |
44 | #include <asm/proto.h> | |
45 | #include <asm/smp.h> | |
2bc0414e | 46 | #include <asm/sections.h> |
718fc13b | 47 | #include <asm/kdebug.h> |
aaa64e04 | 48 | #include <asm/numa.h> |
7bfeab9a | 49 | #include <asm/cacheflush.h> |
1da177e4 | 50 | |
064d25f1 YL |
51 | /* |
52 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. | |
53 | * The direct mapping extends to max_pfn_mapped, so that we can directly access | |
54 | * apertures, ACPI and other tables without having to play with fixmaps. | |
55 | */ | |
f361a450 | 56 | unsigned long max_low_pfn_mapped; |
064d25f1 YL |
57 | unsigned long max_pfn_mapped; |
58 | ||
e18c6874 AK |
59 | static unsigned long dma_reserve __initdata; |
60 | ||
1da177e4 LT |
61 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
62 | ||
a06de630 | 63 | int direct_gbpages |
00d1c5e0 IM |
64 | #ifdef CONFIG_DIRECT_GBPAGES |
65 | = 1 | |
66 | #endif | |
67 | ; | |
68 | ||
69 | static int __init parse_direct_gbpages_off(char *arg) | |
70 | { | |
71 | direct_gbpages = 0; | |
72 | return 0; | |
73 | } | |
74 | early_param("nogbpages", parse_direct_gbpages_off); | |
75 | ||
76 | static int __init parse_direct_gbpages_on(char *arg) | |
77 | { | |
78 | direct_gbpages = 1; | |
79 | return 0; | |
80 | } | |
81 | early_param("gbpages", parse_direct_gbpages_on); | |
82 | ||
1da177e4 LT |
83 | /* |
84 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
85 | * physical space so we can cache the place of the first one and move | |
86 | * around without checking the pgd every time. | |
87 | */ | |
88 | ||
1da177e4 LT |
89 | int after_bootmem; |
90 | ||
8d6ea967 MS |
91 | /* |
92 | * NOTE: This function is marked __ref because it calls __init function | |
93 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | |
94 | */ | |
95 | static __ref void *spp_getpage(void) | |
14a62c34 | 96 | { |
1da177e4 | 97 | void *ptr; |
14a62c34 | 98 | |
1da177e4 | 99 | if (after_bootmem) |
14a62c34 | 100 | ptr = (void *) get_zeroed_page(GFP_ATOMIC); |
1da177e4 LT |
101 | else |
102 | ptr = alloc_bootmem_pages(PAGE_SIZE); | |
14a62c34 TG |
103 | |
104 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | |
105 | panic("set_pte_phys: cannot allocate page data %s\n", | |
106 | after_bootmem ? "after bootmem" : ""); | |
107 | } | |
1da177e4 | 108 | |
10f22dde | 109 | pr_debug("spp_getpage %p\n", ptr); |
14a62c34 | 110 | |
1da177e4 | 111 | return ptr; |
14a62c34 | 112 | } |
1da177e4 | 113 | |
d494a961 | 114 | void |
0814e0ba | 115 | set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) |
1da177e4 | 116 | { |
1da177e4 LT |
117 | pud_t *pud; |
118 | pmd_t *pmd; | |
d494a961 | 119 | pte_t *pte; |
1da177e4 | 120 | |
0814e0ba | 121 | pud = pud_page + pud_index(vaddr); |
1da177e4 | 122 | if (pud_none(*pud)) { |
14a62c34 | 123 | pmd = (pmd_t *) spp_getpage(); |
bb23e403 | 124 | pud_populate(&init_mm, pud, pmd); |
1da177e4 | 125 | if (pmd != pmd_offset(pud, 0)) { |
10f22dde | 126 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", |
14a62c34 | 127 | pmd, pmd_offset(pud, 0)); |
1da177e4 LT |
128 | return; |
129 | } | |
130 | } | |
131 | pmd = pmd_offset(pud, vaddr); | |
132 | if (pmd_none(*pmd)) { | |
133 | pte = (pte_t *) spp_getpage(); | |
bb23e403 | 134 | pmd_populate_kernel(&init_mm, pmd, pte); |
1da177e4 | 135 | if (pte != pte_offset_kernel(pmd, 0)) { |
10f22dde | 136 | printk(KERN_ERR "PAGETABLE BUG #02!\n"); |
1da177e4 LT |
137 | return; |
138 | } | |
139 | } | |
1da177e4 LT |
140 | |
141 | pte = pte_offset_kernel(pmd, vaddr); | |
70c9f590 | 142 | if (!pte_none(*pte) && pte_val(new_pte) && |
1da177e4 LT |
143 | pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask)) |
144 | pte_ERROR(*pte); | |
145 | set_pte(pte, new_pte); | |
146 | ||
147 | /* | |
148 | * It's enough to flush this one mapping. | |
149 | * (PGE mappings get flushed as well) | |
150 | */ | |
151 | __flush_tlb_one(vaddr); | |
152 | } | |
153 | ||
0814e0ba EH |
154 | void |
155 | set_pte_vaddr(unsigned long vaddr, pte_t pteval) | |
156 | { | |
157 | pgd_t *pgd; | |
158 | pud_t *pud_page; | |
159 | ||
160 | pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); | |
161 | ||
162 | pgd = pgd_offset_k(vaddr); | |
163 | if (pgd_none(*pgd)) { | |
164 | printk(KERN_ERR | |
165 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
166 | return; | |
167 | } | |
168 | pud_page = (pud_t*)pgd_page_vaddr(*pgd); | |
169 | set_pte_vaddr_pud(pud_page, vaddr, pteval); | |
170 | } | |
171 | ||
3a9e189d JS |
172 | /* |
173 | * Create large page table mappings for a range of physical addresses. | |
174 | */ | |
175 | static void __init __init_extra_mapping(unsigned long phys, unsigned long size, | |
176 | pgprot_t prot) | |
177 | { | |
178 | pgd_t *pgd; | |
179 | pud_t *pud; | |
180 | pmd_t *pmd; | |
181 | ||
182 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); | |
183 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { | |
184 | pgd = pgd_offset_k((unsigned long)__va(phys)); | |
185 | if (pgd_none(*pgd)) { | |
186 | pud = (pud_t *) spp_getpage(); | |
187 | set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | | |
188 | _PAGE_USER)); | |
189 | } | |
190 | pud = pud_offset(pgd, (unsigned long)__va(phys)); | |
191 | if (pud_none(*pud)) { | |
192 | pmd = (pmd_t *) spp_getpage(); | |
193 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | | |
194 | _PAGE_USER)); | |
195 | } | |
196 | pmd = pmd_offset(pud, phys); | |
197 | BUG_ON(!pmd_none(*pmd)); | |
198 | set_pmd(pmd, __pmd(phys | pgprot_val(prot))); | |
199 | } | |
200 | } | |
201 | ||
202 | void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) | |
203 | { | |
204 | __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE); | |
205 | } | |
206 | ||
207 | void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |
208 | { | |
209 | __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE); | |
210 | } | |
211 | ||
31eedd82 | 212 | /* |
88f3aec7 IM |
213 | * The head.S code sets up the kernel high mapping: |
214 | * | |
215 | * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) | |
31eedd82 TG |
216 | * |
217 | * phys_addr holds the negative offset to the kernel, which is added | |
218 | * to the compile time generated pmds. This results in invalid pmds up | |
219 | * to the point where we hit the physaddr 0 mapping. | |
220 | * | |
221 | * We limit the mappings to the region from _text to _end. _end is | |
222 | * rounded up to the 2MB boundary. This catches the invalid pmds as | |
223 | * well, as they are located before _text: | |
224 | */ | |
225 | void __init cleanup_highmap(void) | |
226 | { | |
227 | unsigned long vaddr = __START_KERNEL_map; | |
228 | unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; | |
229 | pmd_t *pmd = level2_kernel_pgt; | |
230 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | |
231 | ||
232 | for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { | |
2884f110 | 233 | if (pmd_none(*pmd)) |
31eedd82 TG |
234 | continue; |
235 | if (vaddr < (unsigned long) _text || vaddr > end) | |
236 | set_pmd(pmd, __pmd(0)); | |
237 | } | |
238 | } | |
239 | ||
75175278 AK |
240 | static unsigned long __initdata table_start; |
241 | static unsigned long __meminitdata table_end; | |
d86623a0 | 242 | static unsigned long __meminitdata table_top; |
1da177e4 | 243 | |
9482ac6e | 244 | static __ref void *alloc_low_page(unsigned long *phys) |
14a62c34 | 245 | { |
dafe41ee | 246 | unsigned long pfn = table_end++; |
1da177e4 LT |
247 | void *adr; |
248 | ||
44df75e6 MT |
249 | if (after_bootmem) { |
250 | adr = (void *)get_zeroed_page(GFP_ATOMIC); | |
251 | *phys = __pa(adr); | |
14a62c34 | 252 | |
44df75e6 MT |
253 | return adr; |
254 | } | |
255 | ||
d86623a0 | 256 | if (pfn >= table_top) |
14a62c34 | 257 | panic("alloc_low_page: ran out of memory"); |
dafe41ee VG |
258 | |
259 | adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE); | |
44df75e6 | 260 | memset(adr, 0, PAGE_SIZE); |
dafe41ee VG |
261 | *phys = pfn * PAGE_SIZE; |
262 | return adr; | |
263 | } | |
1da177e4 | 264 | |
9482ac6e | 265 | static __ref void unmap_low_page(void *adr) |
14a62c34 | 266 | { |
44df75e6 MT |
267 | if (after_bootmem) |
268 | return; | |
269 | ||
dafe41ee | 270 | early_iounmap(adr, PAGE_SIZE); |
14a62c34 | 271 | } |
1da177e4 | 272 | |
7b16eb89 | 273 | static unsigned long __meminit |
4f9c11dd JF |
274 | phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end) |
275 | { | |
276 | unsigned pages = 0; | |
7b16eb89 | 277 | unsigned long last_map_addr = end; |
4f9c11dd | 278 | int i; |
7b16eb89 | 279 | |
4f9c11dd JF |
280 | pte_t *pte = pte_page + pte_index(addr); |
281 | ||
282 | for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) { | |
283 | ||
284 | if (addr >= end) { | |
285 | if (!after_bootmem) { | |
286 | for(; i < PTRS_PER_PTE; i++, pte++) | |
287 | set_pte(pte, __pte(0)); | |
288 | } | |
289 | break; | |
290 | } | |
291 | ||
292 | if (pte_val(*pte)) | |
293 | continue; | |
294 | ||
295 | if (0) | |
296 | printk(" pte=%p addr=%lx pte=%016lx\n", | |
297 | pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); | |
298 | set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL)); | |
7b16eb89 | 299 | last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE; |
4f9c11dd JF |
300 | pages++; |
301 | } | |
302 | update_page_count(PG_LEVEL_4K, pages); | |
7b16eb89 YL |
303 | |
304 | return last_map_addr; | |
4f9c11dd JF |
305 | } |
306 | ||
7b16eb89 | 307 | static unsigned long __meminit |
4f9c11dd JF |
308 | phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end) |
309 | { | |
310 | pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); | |
311 | ||
7b16eb89 | 312 | return phys_pte_init(pte, address, end); |
4f9c11dd JF |
313 | } |
314 | ||
cc615032 | 315 | static unsigned long __meminit |
b50efd2a YL |
316 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, |
317 | unsigned long page_size_mask) | |
44df75e6 | 318 | { |
ce0c0e50 | 319 | unsigned long pages = 0; |
7b16eb89 | 320 | unsigned long last_map_addr = end; |
a06de630 | 321 | unsigned long start = address; |
ce0c0e50 | 322 | |
6ad91658 | 323 | int i = pmd_index(address); |
44df75e6 | 324 | |
6ad91658 | 325 | for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { |
4f9c11dd | 326 | unsigned long pte_phys; |
6ad91658 | 327 | pmd_t *pmd = pmd_page + pmd_index(address); |
4f9c11dd | 328 | pte_t *pte; |
44df75e6 | 329 | |
5f51e139 | 330 | if (address >= end) { |
14a62c34 | 331 | if (!after_bootmem) { |
5f51e139 JB |
332 | for (; i < PTRS_PER_PMD; i++, pmd++) |
333 | set_pmd(pmd, __pmd(0)); | |
14a62c34 | 334 | } |
44df75e6 MT |
335 | break; |
336 | } | |
6ad91658 | 337 | |
4f9c11dd | 338 | if (pmd_val(*pmd)) { |
8ae3a5a8 JB |
339 | if (!pmd_large(*pmd)) { |
340 | spin_lock(&init_mm.page_table_lock); | |
7b16eb89 | 341 | last_map_addr = phys_pte_update(pmd, address, |
8ae3a5a8 JB |
342 | end); |
343 | spin_unlock(&init_mm.page_table_lock); | |
344 | } | |
a06de630 HD |
345 | /* Count entries we're using from level2_ident_pgt */ |
346 | if (start == 0) | |
347 | pages++; | |
4f9c11dd JF |
348 | continue; |
349 | } | |
350 | ||
b50efd2a | 351 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
4f9c11dd | 352 | pages++; |
8ae3a5a8 | 353 | spin_lock(&init_mm.page_table_lock); |
4f9c11dd JF |
354 | set_pte((pte_t *)pmd, |
355 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | |
8ae3a5a8 | 356 | spin_unlock(&init_mm.page_table_lock); |
7b16eb89 | 357 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; |
6ad91658 | 358 | continue; |
4f9c11dd | 359 | } |
6ad91658 | 360 | |
4f9c11dd | 361 | pte = alloc_low_page(&pte_phys); |
7b16eb89 | 362 | last_map_addr = phys_pte_init(pte, address, end); |
4f9c11dd JF |
363 | unmap_low_page(pte); |
364 | ||
8ae3a5a8 | 365 | spin_lock(&init_mm.page_table_lock); |
4f9c11dd | 366 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); |
8ae3a5a8 | 367 | spin_unlock(&init_mm.page_table_lock); |
44df75e6 | 368 | } |
ce0c0e50 | 369 | update_page_count(PG_LEVEL_2M, pages); |
7b16eb89 | 370 | return last_map_addr; |
44df75e6 MT |
371 | } |
372 | ||
cc615032 | 373 | static unsigned long __meminit |
b50efd2a YL |
374 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, |
375 | unsigned long page_size_mask) | |
44df75e6 | 376 | { |
14a62c34 | 377 | pmd_t *pmd = pmd_offset(pud, 0); |
cc615032 AK |
378 | unsigned long last_map_addr; |
379 | ||
b50efd2a | 380 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); |
6ad91658 | 381 | __flush_tlb_all(); |
cc615032 | 382 | return last_map_addr; |
44df75e6 MT |
383 | } |
384 | ||
cc615032 | 385 | static unsigned long __meminit |
b50efd2a YL |
386 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, |
387 | unsigned long page_size_mask) | |
14a62c34 | 388 | { |
ce0c0e50 | 389 | unsigned long pages = 0; |
cc615032 | 390 | unsigned long last_map_addr = end; |
6ad91658 | 391 | int i = pud_index(addr); |
44df75e6 | 392 | |
14a62c34 | 393 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { |
6ad91658 KM |
394 | unsigned long pmd_phys; |
395 | pud_t *pud = pud_page + pud_index(addr); | |
1da177e4 LT |
396 | pmd_t *pmd; |
397 | ||
6ad91658 | 398 | if (addr >= end) |
1da177e4 | 399 | break; |
1da177e4 | 400 | |
14a62c34 TG |
401 | if (!after_bootmem && |
402 | !e820_any_mapped(addr, addr+PUD_SIZE, 0)) { | |
403 | set_pud(pud, __pud(0)); | |
1da177e4 | 404 | continue; |
14a62c34 | 405 | } |
1da177e4 | 406 | |
6ad91658 | 407 | if (pud_val(*pud)) { |
ef925766 | 408 | if (!pud_large(*pud)) |
b50efd2a YL |
409 | last_map_addr = phys_pmd_update(pud, addr, end, |
410 | page_size_mask); | |
ef925766 AK |
411 | continue; |
412 | } | |
413 | ||
b50efd2a | 414 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
ce0c0e50 | 415 | pages++; |
8ae3a5a8 | 416 | spin_lock(&init_mm.page_table_lock); |
ef925766 AK |
417 | set_pte((pte_t *)pud, |
418 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | |
8ae3a5a8 | 419 | spin_unlock(&init_mm.page_table_lock); |
cc615032 | 420 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; |
6ad91658 KM |
421 | continue; |
422 | } | |
423 | ||
dafe41ee | 424 | pmd = alloc_low_page(&pmd_phys); |
b50efd2a | 425 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); |
4f9c11dd | 426 | unmap_low_page(pmd); |
8ae3a5a8 JB |
427 | |
428 | spin_lock(&init_mm.page_table_lock); | |
4f9c11dd | 429 | pud_populate(&init_mm, pud, __va(pmd_phys)); |
44df75e6 | 430 | spin_unlock(&init_mm.page_table_lock); |
1da177e4 | 431 | } |
1a2b4412 | 432 | __flush_tlb_all(); |
ce0c0e50 | 433 | update_page_count(PG_LEVEL_1G, pages); |
cc615032 | 434 | |
1a0db38e | 435 | return last_map_addr; |
14a62c34 | 436 | } |
1da177e4 | 437 | |
4f9c11dd | 438 | static unsigned long __meminit |
b50efd2a YL |
439 | phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end, |
440 | unsigned long page_size_mask) | |
4f9c11dd JF |
441 | { |
442 | pud_t *pud; | |
443 | ||
444 | pud = (pud_t *)pgd_page_vaddr(*pgd); | |
445 | ||
b50efd2a | 446 | return phys_pud_init(pud, addr, end, page_size_mask); |
4f9c11dd JF |
447 | } |
448 | ||
1da177e4 LT |
449 | static void __init find_early_table_space(unsigned long end) |
450 | { | |
c2e6d65b | 451 | unsigned long puds, pmds, ptes, tables, start; |
1da177e4 LT |
452 | |
453 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | |
ef925766 | 454 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); |
c2e6d65b YL |
455 | if (direct_gbpages) { |
456 | unsigned long extra; | |
457 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); | |
458 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; | |
459 | } else | |
460 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | |
461 | tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | |
462 | ||
463 | if (cpu_has_pse) { | |
464 | unsigned long extra; | |
465 | extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); | |
466 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
467 | } else | |
468 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
469 | tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); | |
1da177e4 | 470 | |
14a62c34 TG |
471 | /* |
472 | * RED-PEN putting page tables only on node 0 could | |
473 | * cause a hotspot and fill up ZONE_DMA. The page tables | |
474 | * need roughly 0.5KB per GB. | |
475 | */ | |
476 | start = 0x8000; | |
24a5da73 | 477 | table_start = find_e820_area(start, end, tables, PAGE_SIZE); |
1da177e4 LT |
478 | if (table_start == -1UL) |
479 | panic("Cannot find space for the kernel page tables"); | |
480 | ||
481 | table_start >>= PAGE_SHIFT; | |
482 | table_end = table_start; | |
d86623a0 | 483 | table_top = table_start + (tables >> PAGE_SHIFT); |
44df75e6 | 484 | |
d86623a0 YL |
485 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", |
486 | end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT); | |
1da177e4 LT |
487 | } |
488 | ||
ef925766 AK |
489 | static void __init init_gbpages(void) |
490 | { | |
491 | if (direct_gbpages && cpu_has_gbpages) | |
492 | printk(KERN_INFO "Using GB pages for direct mapping\n"); | |
493 | else | |
494 | direct_gbpages = 0; | |
495 | } | |
496 | ||
b50efd2a YL |
497 | static unsigned long __init kernel_physical_mapping_init(unsigned long start, |
498 | unsigned long end, | |
499 | unsigned long page_size_mask) | |
14a62c34 | 500 | { |
1da177e4 | 501 | |
b50efd2a | 502 | unsigned long next, last_map_addr = end; |
1da177e4 LT |
503 | |
504 | start = (unsigned long)__va(start); | |
505 | end = (unsigned long)__va(end); | |
506 | ||
507 | for (; start < end; start = next) { | |
44df75e6 | 508 | pgd_t *pgd = pgd_offset_k(start); |
14a62c34 | 509 | unsigned long pud_phys; |
44df75e6 MT |
510 | pud_t *pud; |
511 | ||
e22146e6 | 512 | next = (start + PGDIR_SIZE) & PGDIR_MASK; |
4f9c11dd JF |
513 | if (next > end) |
514 | next = end; | |
515 | ||
516 | if (pgd_val(*pgd)) { | |
b50efd2a YL |
517 | last_map_addr = phys_pud_update(pgd, __pa(start), |
518 | __pa(end), page_size_mask); | |
4f9c11dd JF |
519 | continue; |
520 | } | |
521 | ||
8ae3a5a8 | 522 | pud = alloc_low_page(&pud_phys); |
b50efd2a YL |
523 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), |
524 | page_size_mask); | |
4f9c11dd | 525 | unmap_low_page(pud); |
8ae3a5a8 JB |
526 | |
527 | spin_lock(&init_mm.page_table_lock); | |
528 | pgd_populate(&init_mm, pgd, __va(pud_phys)); | |
529 | spin_unlock(&init_mm.page_table_lock); | |
14a62c34 | 530 | } |
1da177e4 | 531 | |
b50efd2a YL |
532 | return last_map_addr; |
533 | } | |
7b16eb89 YL |
534 | |
535 | struct map_range { | |
536 | unsigned long start; | |
537 | unsigned long end; | |
538 | unsigned page_size_mask; | |
539 | }; | |
540 | ||
541 | #define NR_RANGE_MR 5 | |
542 | ||
543 | static int save_mr(struct map_range *mr, int nr_range, | |
544 | unsigned long start_pfn, unsigned long end_pfn, | |
545 | unsigned long page_size_mask) | |
546 | { | |
547 | ||
548 | if (start_pfn < end_pfn) { | |
549 | if (nr_range >= NR_RANGE_MR) | |
550 | panic("run out of range for init_memory_mapping\n"); | |
551 | mr[nr_range].start = start_pfn<<PAGE_SHIFT; | |
552 | mr[nr_range].end = end_pfn<<PAGE_SHIFT; | |
553 | mr[nr_range].page_size_mask = page_size_mask; | |
554 | nr_range++; | |
555 | } | |
556 | ||
557 | return nr_range; | |
558 | } | |
559 | ||
b50efd2a YL |
560 | /* |
561 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | |
562 | * This runs before bootmem is initialized and gets pages directly from | |
563 | * the physical memory. To access them they are temporarily mapped. | |
564 | */ | |
565 | unsigned long __init_refok init_memory_mapping(unsigned long start, | |
566 | unsigned long end) | |
567 | { | |
7b16eb89 | 568 | unsigned long last_map_addr = 0; |
b50efd2a | 569 | unsigned long page_size_mask = 0; |
c2e6d65b | 570 | unsigned long start_pfn, end_pfn; |
b50efd2a | 571 | |
7b16eb89 YL |
572 | struct map_range mr[NR_RANGE_MR]; |
573 | int nr_range, i; | |
574 | ||
b50efd2a YL |
575 | printk(KERN_INFO "init_memory_mapping\n"); |
576 | ||
577 | /* | |
578 | * Find space for the kernel direct mapping tables. | |
579 | * | |
580 | * Later we should allocate these tables in the local node of the | |
581 | * memory mapped. Unfortunately this is done currently before the | |
582 | * nodes are discovered. | |
583 | */ | |
7b16eb89 | 584 | if (!after_bootmem) |
b50efd2a | 585 | init_gbpages(); |
b50efd2a YL |
586 | |
587 | if (direct_gbpages) | |
588 | page_size_mask |= 1 << PG_LEVEL_1G; | |
589 | if (cpu_has_pse) | |
590 | page_size_mask |= 1 << PG_LEVEL_2M; | |
591 | ||
7b16eb89 YL |
592 | memset(mr, 0, sizeof(mr)); |
593 | nr_range = 0; | |
594 | ||
595 | /* head if not big page alignment ?*/ | |
c2e6d65b YL |
596 | start_pfn = start >> PAGE_SHIFT; |
597 | end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT) | |
598 | << (PMD_SHIFT - PAGE_SHIFT); | |
7b16eb89 | 599 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
c2e6d65b YL |
600 | |
601 | /* big page (2M) range*/ | |
602 | start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) | |
603 | << (PMD_SHIFT - PAGE_SHIFT); | |
604 | end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT) | |
605 | << (PUD_SHIFT - PAGE_SHIFT); | |
606 | if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT))) | |
607 | end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)); | |
7b16eb89 YL |
608 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
609 | page_size_mask & (1<<PG_LEVEL_2M)); | |
c2e6d65b YL |
610 | |
611 | /* big page (1G) range */ | |
612 | start_pfn = end_pfn; | |
613 | end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); | |
7b16eb89 YL |
614 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
615 | page_size_mask & | |
616 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); | |
c2e6d65b YL |
617 | |
618 | /* tail is not big page (1G) alignment */ | |
619 | start_pfn = end_pfn; | |
620 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | |
7b16eb89 YL |
621 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
622 | page_size_mask & (1<<PG_LEVEL_2M)); | |
623 | ||
c2e6d65b YL |
624 | /* tail is not big page (2M) alignment */ |
625 | start_pfn = end_pfn; | |
626 | end_pfn = end>>PAGE_SHIFT; | |
7b16eb89 YL |
627 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
628 | ||
9958e810 YL |
629 | /* try to merge same page size and continuous */ |
630 | for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { | |
631 | unsigned long old_start; | |
632 | if (mr[i].end != mr[i+1].start || | |
633 | mr[i].page_size_mask != mr[i+1].page_size_mask) | |
634 | continue; | |
635 | /* move it */ | |
636 | old_start = mr[i].start; | |
637 | memmove(&mr[i], &mr[i+1], | |
638 | (nr_range - 1 - i) * sizeof (struct map_range)); | |
639 | mr[i].start = old_start; | |
640 | nr_range--; | |
641 | } | |
642 | ||
7b16eb89 YL |
643 | for (i = 0; i < nr_range; i++) |
644 | printk(KERN_DEBUG " %010lx - %010lx page %s\n", | |
645 | mr[i].start, mr[i].end, | |
646 | (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( | |
647 | (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); | |
648 | ||
649 | if (!after_bootmem) | |
650 | find_early_table_space(end); | |
651 | ||
652 | for (i = 0; i < nr_range; i++) | |
c2e6d65b | 653 | last_map_addr = kernel_physical_mapping_init( |
7b16eb89 YL |
654 | mr[i].start, mr[i].end, |
655 | mr[i].page_size_mask); | |
b50efd2a | 656 | |
44df75e6 | 657 | if (!after_bootmem) |
f51c9452 | 658 | mmu_cr4_features = read_cr4(); |
1da177e4 | 659 | __flush_tlb_all(); |
75175278 | 660 | |
b50efd2a | 661 | if (!after_bootmem && table_end > table_start) |
24a5da73 YL |
662 | reserve_early(table_start << PAGE_SHIFT, |
663 | table_end << PAGE_SHIFT, "PGTABLE"); | |
272b9cad | 664 | |
b50efd2a YL |
665 | printk(KERN_INFO "last_map_addr: %lx end: %lx\n", |
666 | last_map_addr, end); | |
667 | ||
272b9cad | 668 | if (!after_bootmem) |
b50efd2a | 669 | early_memtest(start, end); |
cc615032 | 670 | |
1a0db38e | 671 | return last_map_addr >> PAGE_SHIFT; |
1da177e4 LT |
672 | } |
673 | ||
2b97690f | 674 | #ifndef CONFIG_NUMA |
1f75d7e3 YL |
675 | void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn) |
676 | { | |
677 | unsigned long bootmap_size, bootmap; | |
678 | ||
679 | bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; | |
680 | bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size, | |
681 | PAGE_SIZE); | |
682 | if (bootmap == -1L) | |
683 | panic("Cannot find bootmem map of size %ld\n", bootmap_size); | |
346cafec YL |
684 | /* don't touch min_low_pfn */ |
685 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT, | |
686 | 0, end_pfn); | |
1f75d7e3 YL |
687 | e820_register_active_regions(0, start_pfn, end_pfn); |
688 | free_bootmem_with_active_regions(0, end_pfn); | |
689 | early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT); | |
690 | reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT); | |
691 | } | |
692 | ||
1da177e4 LT |
693 | void __init paging_init(void) |
694 | { | |
6391af17 | 695 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
14a62c34 | 696 | |
6391af17 MG |
697 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
698 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | |
699 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; | |
c987d12f | 700 | max_zone_pfns[ZONE_NORMAL] = max_pfn; |
6391af17 | 701 | |
c987d12f | 702 | memory_present(0, 0, max_pfn); |
44df75e6 | 703 | sparse_init(); |
5cb248ab | 704 | free_area_init_nodes(max_zone_pfns); |
1da177e4 LT |
705 | } |
706 | #endif | |
707 | ||
44df75e6 MT |
708 | /* |
709 | * Memory hotplug specific functions | |
44df75e6 | 710 | */ |
bc02af93 | 711 | #ifdef CONFIG_MEMORY_HOTPLUG |
9d99aaa3 AK |
712 | /* |
713 | * Memory is added always to NORMAL zone. This means you will never get | |
714 | * additional DMA/DMA32 memory. | |
715 | */ | |
bc02af93 | 716 | int arch_add_memory(int nid, u64 start, u64 size) |
44df75e6 | 717 | { |
bc02af93 | 718 | struct pglist_data *pgdat = NODE_DATA(nid); |
776ed98b | 719 | struct zone *zone = pgdat->node_zones + ZONE_NORMAL; |
cc615032 | 720 | unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT; |
44df75e6 MT |
721 | unsigned long nr_pages = size >> PAGE_SHIFT; |
722 | int ret; | |
723 | ||
cc615032 AK |
724 | last_mapped_pfn = init_memory_mapping(start, start + size-1); |
725 | if (last_mapped_pfn > max_pfn_mapped) | |
726 | max_pfn_mapped = last_mapped_pfn; | |
45e0b78b | 727 | |
44df75e6 | 728 | ret = __add_pages(zone, start_pfn, nr_pages); |
10f22dde | 729 | WARN_ON(1); |
44df75e6 | 730 | |
44df75e6 | 731 | return ret; |
44df75e6 | 732 | } |
bc02af93 | 733 | EXPORT_SYMBOL_GPL(arch_add_memory); |
44df75e6 | 734 | |
8243229f | 735 | #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) |
4942e998 KM |
736 | int memory_add_physaddr_to_nid(u64 start) |
737 | { | |
738 | return 0; | |
739 | } | |
8c2676a5 | 740 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
4942e998 KM |
741 | #endif |
742 | ||
45e0b78b KM |
743 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
744 | ||
ae531c26 AV |
745 | /* |
746 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address | |
747 | * is valid. The argument is a physical page number. | |
748 | * | |
749 | * | |
750 | * On x86, access has to be given to the first megabyte of ram because that area | |
751 | * contains bios code and data regions used by X and dosemu and similar apps. | |
752 | * Access has to be given to non-kernel-ram areas as well, these contain the PCI | |
753 | * mmio resources as well as potential bios/acpi data regions. | |
754 | */ | |
755 | int devmem_is_allowed(unsigned long pagenr) | |
756 | { | |
757 | if (pagenr <= 256) | |
758 | return 1; | |
759 | if (!page_is_ram(pagenr)) | |
760 | return 1; | |
761 | return 0; | |
762 | } | |
763 | ||
764 | ||
14a62c34 TG |
765 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, |
766 | kcore_modules, kcore_vsyscall; | |
1da177e4 LT |
767 | |
768 | void __init mem_init(void) | |
769 | { | |
0a43e4bf | 770 | long codesize, reservedpages, datasize, initsize; |
1da177e4 | 771 | |
0dc243ae | 772 | pci_iommu_alloc(); |
1da177e4 | 773 | |
48ddb154 | 774 | /* clear_bss() already clear the empty_zero_page */ |
1da177e4 LT |
775 | |
776 | reservedpages = 0; | |
777 | ||
778 | /* this will put all low memory onto the freelists */ | |
2b97690f | 779 | #ifdef CONFIG_NUMA |
0a43e4bf | 780 | totalram_pages = numa_free_all_bootmem(); |
1da177e4 | 781 | #else |
0a43e4bf | 782 | totalram_pages = free_all_bootmem(); |
1da177e4 | 783 | #endif |
c987d12f YL |
784 | reservedpages = max_pfn - totalram_pages - |
785 | absent_pages_in_range(0, max_pfn); | |
1da177e4 LT |
786 | after_bootmem = 1; |
787 | ||
788 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
789 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
790 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
791 | ||
792 | /* Register memory areas for /proc/kcore */ | |
14a62c34 TG |
793 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); |
794 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
1da177e4 LT |
795 | VMALLOC_END-VMALLOC_START); |
796 | kclist_add(&kcore_kernel, &_stext, _end - _stext); | |
797 | kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN); | |
14a62c34 | 798 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, |
1da177e4 LT |
799 | VSYSCALL_END - VSYSCALL_START); |
800 | ||
10f22dde | 801 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " |
14a62c34 | 802 | "%ldk reserved, %ldk data, %ldk init)\n", |
1da177e4 | 803 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
c987d12f | 804 | max_pfn << (PAGE_SHIFT-10), |
1da177e4 LT |
805 | codesize >> 10, |
806 | reservedpages << (PAGE_SHIFT-10), | |
807 | datasize >> 10, | |
808 | initsize >> 10); | |
76ebd054 TG |
809 | |
810 | cpa_init(); | |
1da177e4 LT |
811 | } |
812 | ||
d167a518 | 813 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
1da177e4 | 814 | { |
bfc734b2 | 815 | unsigned long addr = begin; |
1da177e4 | 816 | |
bfc734b2 | 817 | if (addr >= end) |
d167a518 GH |
818 | return; |
819 | ||
ee01f112 IM |
820 | /* |
821 | * If debugging page accesses then do not free this memory but | |
822 | * mark them not present - any buggy init-section access will | |
823 | * create a kernel page fault: | |
824 | */ | |
825 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
826 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | |
827 | begin, PAGE_ALIGN(end)); | |
828 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | |
829 | #else | |
6fb14755 | 830 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
14a62c34 | 831 | |
bfc734b2 | 832 | for (; addr < end; addr += PAGE_SIZE) { |
e3ebadd9 LT |
833 | ClearPageReserved(virt_to_page(addr)); |
834 | init_page_count(virt_to_page(addr)); | |
835 | memset((void *)(addr & ~(PAGE_SIZE-1)), | |
836 | POISON_FREE_INITMEM, PAGE_SIZE); | |
e3ebadd9 | 837 | free_page(addr); |
1da177e4 LT |
838 | totalram_pages++; |
839 | } | |
ee01f112 | 840 | #endif |
d167a518 GH |
841 | } |
842 | ||
843 | void free_initmem(void) | |
844 | { | |
d167a518 | 845 | free_init_pages("unused kernel memory", |
e3ebadd9 LT |
846 | (unsigned long)(&__init_begin), |
847 | (unsigned long)(&__init_end)); | |
1da177e4 LT |
848 | } |
849 | ||
67df197b | 850 | #ifdef CONFIG_DEBUG_RODATA |
edeed305 AV |
851 | const int rodata_test_data = 0xC3; |
852 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
67df197b | 853 | |
67df197b AV |
854 | void mark_rodata_ro(void) |
855 | { | |
4e4eee0e | 856 | unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata); |
8f0f996e SR |
857 | unsigned long rodata_start = |
858 | ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; | |
859 | ||
860 | #ifdef CONFIG_DYNAMIC_FTRACE | |
861 | /* Dynamic tracing modifies the kernel text section */ | |
862 | start = rodata_start; | |
863 | #endif | |
67df197b | 864 | |
6fb14755 | 865 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
e3ebadd9 | 866 | (end - start) >> 10); |
984bb80d AV |
867 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
868 | ||
869 | /* | |
870 | * The rodata section (but not the kernel text!) should also be | |
871 | * not-executable. | |
872 | */ | |
72b59d67 | 873 | set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT); |
67df197b | 874 | |
1a487252 AV |
875 | rodata_test(); |
876 | ||
0c42f392 | 877 | #ifdef CONFIG_CPA_DEBUG |
10f22dde | 878 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); |
6d238cc4 | 879 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 880 | |
10f22dde | 881 | printk(KERN_INFO "Testing CPA: again\n"); |
6d238cc4 | 882 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 883 | #endif |
67df197b | 884 | } |
4e4eee0e | 885 | |
67df197b AV |
886 | #endif |
887 | ||
1da177e4 LT |
888 | #ifdef CONFIG_BLK_DEV_INITRD |
889 | void free_initrd_mem(unsigned long start, unsigned long end) | |
890 | { | |
e3ebadd9 | 891 | free_init_pages("initrd memory", start, end); |
1da177e4 LT |
892 | } |
893 | #endif | |
894 | ||
d2dbf343 YL |
895 | int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, |
896 | int flags) | |
14a62c34 | 897 | { |
2b97690f | 898 | #ifdef CONFIG_NUMA |
8b3cd09e | 899 | int nid, next_nid; |
6a07a0ed | 900 | int ret; |
5e58a02a AK |
901 | #endif |
902 | unsigned long pfn = phys >> PAGE_SHIFT; | |
14a62c34 | 903 | |
c987d12f | 904 | if (pfn >= max_pfn) { |
14a62c34 TG |
905 | /* |
906 | * This can happen with kdump kernels when accessing | |
907 | * firmware tables: | |
908 | */ | |
67794292 | 909 | if (pfn < max_pfn_mapped) |
8b2ef1d7 | 910 | return -EFAULT; |
14a62c34 | 911 | |
6a07a0ed | 912 | printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n", |
5e58a02a | 913 | phys, len); |
8b2ef1d7 | 914 | return -EFAULT; |
5e58a02a AK |
915 | } |
916 | ||
917 | /* Should check here against the e820 map to avoid double free */ | |
918 | #ifdef CONFIG_NUMA | |
8b3cd09e YL |
919 | nid = phys_to_nid(phys); |
920 | next_nid = phys_to_nid(phys + len - 1); | |
921 | if (nid == next_nid) | |
8b2ef1d7 | 922 | ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags); |
8b3cd09e | 923 | else |
8b2ef1d7 BW |
924 | ret = reserve_bootmem(phys, len, flags); |
925 | ||
926 | if (ret != 0) | |
927 | return ret; | |
928 | ||
14a62c34 | 929 | #else |
72a7fe39 | 930 | reserve_bootmem(phys, len, BOOTMEM_DEFAULT); |
1da177e4 | 931 | #endif |
8b3cd09e | 932 | |
0e0b864e | 933 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { |
e18c6874 | 934 | dma_reserve += len / PAGE_SIZE; |
0e0b864e MG |
935 | set_dma_reserve(dma_reserve); |
936 | } | |
8b2ef1d7 BW |
937 | |
938 | return 0; | |
1da177e4 LT |
939 | } |
940 | ||
14a62c34 TG |
941 | int kern_addr_valid(unsigned long addr) |
942 | { | |
1da177e4 | 943 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; |
14a62c34 TG |
944 | pgd_t *pgd; |
945 | pud_t *pud; | |
946 | pmd_t *pmd; | |
947 | pte_t *pte; | |
1da177e4 LT |
948 | |
949 | if (above != 0 && above != -1UL) | |
14a62c34 TG |
950 | return 0; |
951 | ||
1da177e4 LT |
952 | pgd = pgd_offset_k(addr); |
953 | if (pgd_none(*pgd)) | |
954 | return 0; | |
955 | ||
956 | pud = pud_offset(pgd, addr); | |
957 | if (pud_none(*pud)) | |
14a62c34 | 958 | return 0; |
1da177e4 LT |
959 | |
960 | pmd = pmd_offset(pud, addr); | |
961 | if (pmd_none(*pmd)) | |
962 | return 0; | |
14a62c34 | 963 | |
1da177e4 LT |
964 | if (pmd_large(*pmd)) |
965 | return pfn_valid(pmd_pfn(*pmd)); | |
966 | ||
967 | pte = pte_offset_kernel(pmd, addr); | |
968 | if (pte_none(*pte)) | |
969 | return 0; | |
14a62c34 | 970 | |
1da177e4 LT |
971 | return pfn_valid(pte_pfn(*pte)); |
972 | } | |
973 | ||
14a62c34 TG |
974 | /* |
975 | * A pseudo VMA to allow ptrace access for the vsyscall page. This only | |
976 | * covers the 64bit vsyscall page now. 32bit has a real VMA now and does | |
977 | * not need special handling anymore: | |
978 | */ | |
1da177e4 | 979 | static struct vm_area_struct gate_vma = { |
14a62c34 TG |
980 | .vm_start = VSYSCALL_START, |
981 | .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), | |
982 | .vm_page_prot = PAGE_READONLY_EXEC, | |
983 | .vm_flags = VM_READ | VM_EXEC | |
1da177e4 LT |
984 | }; |
985 | ||
1da177e4 LT |
986 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) |
987 | { | |
988 | #ifdef CONFIG_IA32_EMULATION | |
1e014410 AK |
989 | if (test_tsk_thread_flag(tsk, TIF_IA32)) |
990 | return NULL; | |
1da177e4 LT |
991 | #endif |
992 | return &gate_vma; | |
993 | } | |
994 | ||
995 | int in_gate_area(struct task_struct *task, unsigned long addr) | |
996 | { | |
997 | struct vm_area_struct *vma = get_gate_vma(task); | |
14a62c34 | 998 | |
1e014410 AK |
999 | if (!vma) |
1000 | return 0; | |
14a62c34 | 1001 | |
1da177e4 LT |
1002 | return (addr >= vma->vm_start) && (addr < vma->vm_end); |
1003 | } | |
1004 | ||
14a62c34 TG |
1005 | /* |
1006 | * Use this when you have no reliable task/vma, typically from interrupt | |
1007 | * context. It is less reliable than using the task's vma and may give | |
1008 | * false positives: | |
1da177e4 LT |
1009 | */ |
1010 | int in_gate_area_no_task(unsigned long addr) | |
1011 | { | |
1e014410 | 1012 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); |
1da177e4 | 1013 | } |
2e1c49db | 1014 | |
2aae950b AK |
1015 | const char *arch_vma_name(struct vm_area_struct *vma) |
1016 | { | |
1017 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | |
1018 | return "[vdso]"; | |
1019 | if (vma == &gate_vma) | |
1020 | return "[vsyscall]"; | |
1021 | return NULL; | |
1022 | } | |
0889eba5 CL |
1023 | |
1024 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
1025 | /* | |
1026 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | |
1027 | */ | |
c2b91e2e YL |
1028 | static long __meminitdata addr_start, addr_end; |
1029 | static void __meminitdata *p_start, *p_end; | |
1030 | static int __meminitdata node_start; | |
1031 | ||
14a62c34 TG |
1032 | int __meminit |
1033 | vmemmap_populate(struct page *start_page, unsigned long size, int node) | |
0889eba5 CL |
1034 | { |
1035 | unsigned long addr = (unsigned long)start_page; | |
1036 | unsigned long end = (unsigned long)(start_page + size); | |
1037 | unsigned long next; | |
1038 | pgd_t *pgd; | |
1039 | pud_t *pud; | |
1040 | pmd_t *pmd; | |
1041 | ||
1042 | for (; addr < end; addr = next) { | |
7c934d39 | 1043 | void *p = NULL; |
0889eba5 CL |
1044 | |
1045 | pgd = vmemmap_pgd_populate(addr, node); | |
1046 | if (!pgd) | |
1047 | return -ENOMEM; | |
14a62c34 | 1048 | |
0889eba5 CL |
1049 | pud = vmemmap_pud_populate(pgd, addr, node); |
1050 | if (!pud) | |
1051 | return -ENOMEM; | |
1052 | ||
7c934d39 JF |
1053 | if (!cpu_has_pse) { |
1054 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1055 | pmd = vmemmap_pmd_populate(pud, addr, node); | |
1056 | ||
1057 | if (!pmd) | |
1058 | return -ENOMEM; | |
1059 | ||
1060 | p = vmemmap_pte_populate(pmd, addr, node); | |
14a62c34 | 1061 | |
0889eba5 CL |
1062 | if (!p) |
1063 | return -ENOMEM; | |
1064 | ||
7c934d39 JF |
1065 | addr_end = addr + PAGE_SIZE; |
1066 | p_end = p + PAGE_SIZE; | |
14a62c34 | 1067 | } else { |
7c934d39 JF |
1068 | next = pmd_addr_end(addr, end); |
1069 | ||
1070 | pmd = pmd_offset(pud, addr); | |
1071 | if (pmd_none(*pmd)) { | |
1072 | pte_t entry; | |
1073 | ||
1074 | p = vmemmap_alloc_block(PMD_SIZE, node); | |
1075 | if (!p) | |
1076 | return -ENOMEM; | |
1077 | ||
1078 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, | |
1079 | PAGE_KERNEL_LARGE); | |
1080 | set_pmd(pmd, __pmd(pte_val(entry))); | |
1081 | ||
7c934d39 JF |
1082 | /* check to see if we have contiguous blocks */ |
1083 | if (p_end != p || node_start != node) { | |
1084 | if (p_start) | |
1085 | printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | |
1086 | addr_start, addr_end-1, p_start, p_end-1, node_start); | |
1087 | addr_start = addr; | |
1088 | node_start = node; | |
1089 | p_start = p; | |
1090 | } | |
49c980df YL |
1091 | |
1092 | addr_end = addr + PMD_SIZE; | |
1093 | p_end = p + PMD_SIZE; | |
7c934d39 JF |
1094 | } else |
1095 | vmemmap_verify((pte_t *)pmd, node, addr, next); | |
14a62c34 | 1096 | } |
7c934d39 | 1097 | |
0889eba5 | 1098 | } |
0889eba5 CL |
1099 | return 0; |
1100 | } | |
c2b91e2e YL |
1101 | |
1102 | void __meminit vmemmap_populate_print_last(void) | |
1103 | { | |
1104 | if (p_start) { | |
1105 | printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | |
1106 | addr_start, addr_end-1, p_start, p_end-1, node_start); | |
1107 | p_start = NULL; | |
1108 | p_end = NULL; | |
1109 | node_start = 0; | |
1110 | } | |
1111 | } | |
0889eba5 | 1112 | #endif |