]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
6 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> | |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/bootmem.h> | |
23 | #include <linux/proc_fs.h> | |
59170891 | 24 | #include <linux/pci.h> |
6fb14755 | 25 | #include <linux/pfn.h> |
c9cf5528 | 26 | #include <linux/poison.h> |
17a941d8 | 27 | #include <linux/dma-mapping.h> |
44df75e6 MT |
28 | #include <linux/module.h> |
29 | #include <linux/memory_hotplug.h> | |
ae32b129 | 30 | #include <linux/nmi.h> |
1da177e4 LT |
31 | |
32 | #include <asm/processor.h> | |
33 | #include <asm/system.h> | |
34 | #include <asm/uaccess.h> | |
35 | #include <asm/pgtable.h> | |
36 | #include <asm/pgalloc.h> | |
37 | #include <asm/dma.h> | |
38 | #include <asm/fixmap.h> | |
39 | #include <asm/e820.h> | |
40 | #include <asm/apic.h> | |
41 | #include <asm/tlb.h> | |
42 | #include <asm/mmu_context.h> | |
43 | #include <asm/proto.h> | |
44 | #include <asm/smp.h> | |
2bc0414e | 45 | #include <asm/sections.h> |
718fc13b | 46 | #include <asm/kdebug.h> |
aaa64e04 | 47 | #include <asm/numa.h> |
7bfeab9a | 48 | #include <asm/cacheflush.h> |
1da177e4 | 49 | |
14a62c34 | 50 | const struct dma_mapping_ops *dma_ops; |
17a941d8 MBY |
51 | EXPORT_SYMBOL(dma_ops); |
52 | ||
e18c6874 AK |
53 | static unsigned long dma_reserve __initdata; |
54 | ||
1da177e4 LT |
55 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
56 | ||
57 | /* | |
58 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
59 | * physical space so we can cache the place of the first one and move | |
60 | * around without checking the pgd every time. | |
61 | */ | |
62 | ||
63 | void show_mem(void) | |
64 | { | |
e92343cc AK |
65 | long i, total = 0, reserved = 0; |
66 | long shared = 0, cached = 0; | |
1da177e4 | 67 | struct page *page; |
14a62c34 | 68 | pg_data_t *pgdat; |
1da177e4 | 69 | |
e92343cc | 70 | printk(KERN_INFO "Mem-info:\n"); |
1da177e4 | 71 | show_free_areas(); |
14a62c34 TG |
72 | printk(KERN_INFO "Free swap: %6ldkB\n", |
73 | nr_swap_pages << (PAGE_SHIFT-10)); | |
1da177e4 | 74 | |
ec936fc5 | 75 | for_each_online_pgdat(pgdat) { |
14a62c34 TG |
76 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
77 | /* | |
78 | * This loop can take a while with 256 GB and | |
79 | * 4k pages so defer the NMI watchdog: | |
80 | */ | |
81 | if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) | |
ae32b129 | 82 | touch_nmi_watchdog(); |
14a62c34 | 83 | |
12710a56 BP |
84 | if (!pfn_valid(pgdat->node_start_pfn + i)) |
85 | continue; | |
14a62c34 | 86 | |
1da177e4 LT |
87 | page = pfn_to_page(pgdat->node_start_pfn + i); |
88 | total++; | |
e92343cc AK |
89 | if (PageReserved(page)) |
90 | reserved++; | |
91 | else if (PageSwapCache(page)) | |
92 | cached++; | |
93 | else if (page_count(page)) | |
94 | shared += page_count(page) - 1; | |
14a62c34 | 95 | } |
1da177e4 | 96 | } |
14a62c34 TG |
97 | printk(KERN_INFO "%lu pages of RAM\n", total); |
98 | printk(KERN_INFO "%lu reserved pages\n", reserved); | |
99 | printk(KERN_INFO "%lu pages shared\n", shared); | |
100 | printk(KERN_INFO "%lu pages swap cached\n", cached); | |
1da177e4 LT |
101 | } |
102 | ||
1da177e4 LT |
103 | int after_bootmem; |
104 | ||
5f44a669 | 105 | static __init void *spp_getpage(void) |
14a62c34 | 106 | { |
1da177e4 | 107 | void *ptr; |
14a62c34 | 108 | |
1da177e4 | 109 | if (after_bootmem) |
14a62c34 | 110 | ptr = (void *) get_zeroed_page(GFP_ATOMIC); |
1da177e4 LT |
111 | else |
112 | ptr = alloc_bootmem_pages(PAGE_SIZE); | |
14a62c34 TG |
113 | |
114 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | |
115 | panic("set_pte_phys: cannot allocate page data %s\n", | |
116 | after_bootmem ? "after bootmem" : ""); | |
117 | } | |
1da177e4 | 118 | |
10f22dde | 119 | pr_debug("spp_getpage %p\n", ptr); |
14a62c34 | 120 | |
1da177e4 | 121 | return ptr; |
14a62c34 | 122 | } |
1da177e4 | 123 | |
14a62c34 TG |
124 | static __init void |
125 | set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot) | |
1da177e4 LT |
126 | { |
127 | pgd_t *pgd; | |
128 | pud_t *pud; | |
129 | pmd_t *pmd; | |
130 | pte_t *pte, new_pte; | |
131 | ||
10f22dde | 132 | pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys); |
1da177e4 LT |
133 | |
134 | pgd = pgd_offset_k(vaddr); | |
135 | if (pgd_none(*pgd)) { | |
10f22dde IM |
136 | printk(KERN_ERR |
137 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
1da177e4 LT |
138 | return; |
139 | } | |
140 | pud = pud_offset(pgd, vaddr); | |
141 | if (pud_none(*pud)) { | |
14a62c34 | 142 | pmd = (pmd_t *) spp_getpage(); |
1da177e4 LT |
143 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); |
144 | if (pmd != pmd_offset(pud, 0)) { | |
10f22dde | 145 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", |
14a62c34 | 146 | pmd, pmd_offset(pud, 0)); |
1da177e4 LT |
147 | return; |
148 | } | |
149 | } | |
150 | pmd = pmd_offset(pud, vaddr); | |
151 | if (pmd_none(*pmd)) { | |
152 | pte = (pte_t *) spp_getpage(); | |
153 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); | |
154 | if (pte != pte_offset_kernel(pmd, 0)) { | |
10f22dde | 155 | printk(KERN_ERR "PAGETABLE BUG #02!\n"); |
1da177e4 LT |
156 | return; |
157 | } | |
158 | } | |
159 | new_pte = pfn_pte(phys >> PAGE_SHIFT, prot); | |
160 | ||
161 | pte = pte_offset_kernel(pmd, vaddr); | |
162 | if (!pte_none(*pte) && | |
163 | pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask)) | |
164 | pte_ERROR(*pte); | |
165 | set_pte(pte, new_pte); | |
166 | ||
167 | /* | |
168 | * It's enough to flush this one mapping. | |
169 | * (PGE mappings get flushed as well) | |
170 | */ | |
171 | __flush_tlb_one(vaddr); | |
172 | } | |
173 | ||
174 | /* NOTE: this is meant to be run only at boot */ | |
14a62c34 TG |
175 | void __init |
176 | __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |
1da177e4 LT |
177 | { |
178 | unsigned long address = __fix_to_virt(idx); | |
179 | ||
180 | if (idx >= __end_of_fixed_addresses) { | |
10f22dde | 181 | printk(KERN_ERR "Invalid __set_fixmap\n"); |
1da177e4 LT |
182 | return; |
183 | } | |
184 | set_pte_phys(address, phys, prot); | |
185 | } | |
186 | ||
75175278 AK |
187 | static unsigned long __initdata table_start; |
188 | static unsigned long __meminitdata table_end; | |
1da177e4 | 189 | |
dafe41ee | 190 | static __meminit void *alloc_low_page(unsigned long *phys) |
14a62c34 | 191 | { |
dafe41ee | 192 | unsigned long pfn = table_end++; |
1da177e4 LT |
193 | void *adr; |
194 | ||
44df75e6 MT |
195 | if (after_bootmem) { |
196 | adr = (void *)get_zeroed_page(GFP_ATOMIC); | |
197 | *phys = __pa(adr); | |
14a62c34 | 198 | |
44df75e6 MT |
199 | return adr; |
200 | } | |
201 | ||
14a62c34 TG |
202 | if (pfn >= end_pfn) |
203 | panic("alloc_low_page: ran out of memory"); | |
dafe41ee VG |
204 | |
205 | adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE); | |
44df75e6 | 206 | memset(adr, 0, PAGE_SIZE); |
dafe41ee VG |
207 | *phys = pfn * PAGE_SIZE; |
208 | return adr; | |
209 | } | |
1da177e4 | 210 | |
dafe41ee | 211 | static __meminit void unmap_low_page(void *adr) |
14a62c34 | 212 | { |
44df75e6 MT |
213 | if (after_bootmem) |
214 | return; | |
215 | ||
dafe41ee | 216 | early_iounmap(adr, PAGE_SIZE); |
14a62c34 | 217 | } |
1da177e4 | 218 | |
f2d3efed | 219 | /* Must run before zap_low_mappings */ |
a3142c8e | 220 | __meminit void *early_ioremap(unsigned long addr, unsigned long size) |
f2d3efed | 221 | { |
dafe41ee | 222 | pmd_t *pmd, *last_pmd; |
14a62c34 | 223 | unsigned long vaddr; |
dafe41ee VG |
224 | int i, pmds; |
225 | ||
226 | pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE; | |
227 | vaddr = __START_KERNEL_map; | |
228 | pmd = level2_kernel_pgt; | |
229 | last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1; | |
14a62c34 | 230 | |
dafe41ee VG |
231 | for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) { |
232 | for (i = 0; i < pmds; i++) { | |
233 | if (pmd_present(pmd[i])) | |
14a62c34 | 234 | goto continue_outer_loop; |
dafe41ee VG |
235 | } |
236 | vaddr += addr & ~PMD_MASK; | |
237 | addr &= PMD_MASK; | |
14a62c34 | 238 | |
dafe41ee | 239 | for (i = 0; i < pmds; i++, addr += PMD_SIZE) |
929fd589 | 240 | set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC)); |
1a2b4412 | 241 | __flush_tlb_all(); |
14a62c34 | 242 | |
dafe41ee | 243 | return (void *)vaddr; |
14a62c34 | 244 | continue_outer_loop: |
dafe41ee | 245 | ; |
f2d3efed | 246 | } |
10f22dde | 247 | printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size); |
14a62c34 | 248 | |
dafe41ee | 249 | return NULL; |
f2d3efed AK |
250 | } |
251 | ||
14a62c34 TG |
252 | /* |
253 | * To avoid virtual aliases later: | |
254 | */ | |
a3142c8e | 255 | __meminit void early_iounmap(void *addr, unsigned long size) |
f2d3efed | 256 | { |
dafe41ee VG |
257 | unsigned long vaddr; |
258 | pmd_t *pmd; | |
259 | int i, pmds; | |
260 | ||
261 | vaddr = (unsigned long)addr; | |
262 | pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE; | |
263 | pmd = level2_kernel_pgt + pmd_index(vaddr); | |
14a62c34 | 264 | |
dafe41ee VG |
265 | for (i = 0; i < pmds; i++) |
266 | pmd_clear(pmd + i); | |
14a62c34 | 267 | |
1a2b4412 | 268 | __flush_tlb_all(); |
f2d3efed AK |
269 | } |
270 | ||
44df75e6 | 271 | static void __meminit |
6ad91658 | 272 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) |
44df75e6 | 273 | { |
6ad91658 | 274 | int i = pmd_index(address); |
44df75e6 | 275 | |
6ad91658 | 276 | for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { |
6ad91658 | 277 | pmd_t *pmd = pmd_page + pmd_index(address); |
44df75e6 | 278 | |
5f51e139 | 279 | if (address >= end) { |
14a62c34 | 280 | if (!after_bootmem) { |
5f51e139 JB |
281 | for (; i < PTRS_PER_PMD; i++, pmd++) |
282 | set_pmd(pmd, __pmd(0)); | |
14a62c34 | 283 | } |
44df75e6 MT |
284 | break; |
285 | } | |
6ad91658 KM |
286 | |
287 | if (pmd_val(*pmd)) | |
288 | continue; | |
289 | ||
d4f71f79 AK |
290 | set_pte((pte_t *)pmd, |
291 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | |
44df75e6 MT |
292 | } |
293 | } | |
294 | ||
295 | static void __meminit | |
296 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) | |
297 | { | |
14a62c34 | 298 | pmd_t *pmd = pmd_offset(pud, 0); |
6ad91658 KM |
299 | spin_lock(&init_mm.page_table_lock); |
300 | phys_pmd_init(pmd, address, end); | |
301 | spin_unlock(&init_mm.page_table_lock); | |
302 | __flush_tlb_all(); | |
44df75e6 MT |
303 | } |
304 | ||
14a62c34 TG |
305 | static void __meminit |
306 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) | |
307 | { | |
6ad91658 | 308 | int i = pud_index(addr); |
44df75e6 | 309 | |
14a62c34 | 310 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { |
6ad91658 KM |
311 | unsigned long pmd_phys; |
312 | pud_t *pud = pud_page + pud_index(addr); | |
1da177e4 LT |
313 | pmd_t *pmd; |
314 | ||
6ad91658 | 315 | if (addr >= end) |
1da177e4 | 316 | break; |
1da177e4 | 317 | |
14a62c34 TG |
318 | if (!after_bootmem && |
319 | !e820_any_mapped(addr, addr+PUD_SIZE, 0)) { | |
320 | set_pud(pud, __pud(0)); | |
1da177e4 | 321 | continue; |
14a62c34 | 322 | } |
1da177e4 | 323 | |
6ad91658 KM |
324 | if (pud_val(*pud)) { |
325 | phys_pmd_update(pud, addr, end); | |
326 | continue; | |
327 | } | |
328 | ||
dafe41ee | 329 | pmd = alloc_low_page(&pmd_phys); |
14a62c34 | 330 | |
44df75e6 | 331 | spin_lock(&init_mm.page_table_lock); |
1da177e4 | 332 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); |
6ad91658 | 333 | phys_pmd_init(pmd, addr, end); |
44df75e6 | 334 | spin_unlock(&init_mm.page_table_lock); |
14a62c34 | 335 | |
dafe41ee | 336 | unmap_low_page(pmd); |
1da177e4 | 337 | } |
1a2b4412 | 338 | __flush_tlb_all(); |
14a62c34 | 339 | } |
1da177e4 LT |
340 | |
341 | static void __init find_early_table_space(unsigned long end) | |
342 | { | |
6c5acd16 | 343 | unsigned long puds, pmds, tables, start; |
1da177e4 LT |
344 | |
345 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | |
346 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | |
347 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) + | |
348 | round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | |
349 | ||
14a62c34 TG |
350 | /* |
351 | * RED-PEN putting page tables only on node 0 could | |
352 | * cause a hotspot and fill up ZONE_DMA. The page tables | |
353 | * need roughly 0.5KB per GB. | |
354 | */ | |
355 | start = 0x8000; | |
24a5da73 | 356 | table_start = find_e820_area(start, end, tables, PAGE_SIZE); |
1da177e4 LT |
357 | if (table_start == -1UL) |
358 | panic("Cannot find space for the kernel page tables"); | |
359 | ||
360 | table_start >>= PAGE_SHIFT; | |
361 | table_end = table_start; | |
44df75e6 MT |
362 | |
363 | early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", | |
5f51e139 JB |
364 | end, table_start << PAGE_SHIFT, |
365 | (table_start << PAGE_SHIFT) + tables); | |
1da177e4 LT |
366 | } |
367 | ||
14a62c34 TG |
368 | /* |
369 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | |
370 | * This runs before bootmem is initialized and gets pages directly from | |
371 | * the physical memory. To access them they are temporarily mapped. | |
372 | */ | |
b6fd6ecb | 373 | void __init_refok init_memory_mapping(unsigned long start, unsigned long end) |
14a62c34 TG |
374 | { |
375 | unsigned long next; | |
1da177e4 | 376 | |
10f22dde | 377 | pr_debug("init_memory_mapping\n"); |
1da177e4 | 378 | |
14a62c34 | 379 | /* |
1da177e4 | 380 | * Find space for the kernel direct mapping tables. |
14a62c34 TG |
381 | * |
382 | * Later we should allocate these tables in the local node of the | |
383 | * memory mapped. Unfortunately this is done currently before the | |
384 | * nodes are discovered. | |
1da177e4 | 385 | */ |
44df75e6 MT |
386 | if (!after_bootmem) |
387 | find_early_table_space(end); | |
1da177e4 LT |
388 | |
389 | start = (unsigned long)__va(start); | |
390 | end = (unsigned long)__va(end); | |
391 | ||
392 | for (; start < end; start = next) { | |
44df75e6 | 393 | pgd_t *pgd = pgd_offset_k(start); |
14a62c34 | 394 | unsigned long pud_phys; |
44df75e6 MT |
395 | pud_t *pud; |
396 | ||
397 | if (after_bootmem) | |
d2ae5b5f | 398 | pud = pud_offset(pgd, start & PGDIR_MASK); |
44df75e6 | 399 | else |
dafe41ee | 400 | pud = alloc_low_page(&pud_phys); |
44df75e6 | 401 | |
1da177e4 | 402 | next = start + PGDIR_SIZE; |
14a62c34 TG |
403 | if (next > end) |
404 | next = end; | |
1da177e4 | 405 | phys_pud_init(pud, __pa(start), __pa(next)); |
44df75e6 MT |
406 | if (!after_bootmem) |
407 | set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); | |
dafe41ee | 408 | unmap_low_page(pud); |
14a62c34 | 409 | } |
1da177e4 | 410 | |
44df75e6 | 411 | if (!after_bootmem) |
f51c9452 | 412 | mmu_cr4_features = read_cr4(); |
1da177e4 | 413 | __flush_tlb_all(); |
75175278 | 414 | |
24a5da73 YL |
415 | if (!after_bootmem) |
416 | reserve_early(table_start << PAGE_SHIFT, | |
417 | table_end << PAGE_SHIFT, "PGTABLE"); | |
1da177e4 LT |
418 | } |
419 | ||
2b97690f | 420 | #ifndef CONFIG_NUMA |
1da177e4 LT |
421 | void __init paging_init(void) |
422 | { | |
6391af17 | 423 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
14a62c34 | 424 | |
6391af17 MG |
425 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
426 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | |
427 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; | |
428 | max_zone_pfns[ZONE_NORMAL] = end_pfn; | |
429 | ||
44df75e6 MT |
430 | memory_present(0, 0, end_pfn); |
431 | sparse_init(); | |
5cb248ab | 432 | free_area_init_nodes(max_zone_pfns); |
1da177e4 LT |
433 | } |
434 | #endif | |
435 | ||
44df75e6 MT |
436 | /* |
437 | * Memory hotplug specific functions | |
44df75e6 | 438 | */ |
44df75e6 MT |
439 | void online_page(struct page *page) |
440 | { | |
441 | ClearPageReserved(page); | |
7835e98b | 442 | init_page_count(page); |
44df75e6 MT |
443 | __free_page(page); |
444 | totalram_pages++; | |
445 | num_physpages++; | |
446 | } | |
447 | ||
bc02af93 | 448 | #ifdef CONFIG_MEMORY_HOTPLUG |
9d99aaa3 AK |
449 | /* |
450 | * Memory is added always to NORMAL zone. This means you will never get | |
451 | * additional DMA/DMA32 memory. | |
452 | */ | |
bc02af93 | 453 | int arch_add_memory(int nid, u64 start, u64 size) |
44df75e6 | 454 | { |
bc02af93 | 455 | struct pglist_data *pgdat = NODE_DATA(nid); |
776ed98b | 456 | struct zone *zone = pgdat->node_zones + ZONE_NORMAL; |
44df75e6 MT |
457 | unsigned long start_pfn = start >> PAGE_SHIFT; |
458 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
459 | int ret; | |
460 | ||
14a62c34 | 461 | init_memory_mapping(start, start + size-1); |
45e0b78b | 462 | |
44df75e6 | 463 | ret = __add_pages(zone, start_pfn, nr_pages); |
10f22dde | 464 | WARN_ON(1); |
44df75e6 | 465 | |
44df75e6 | 466 | return ret; |
44df75e6 | 467 | } |
bc02af93 | 468 | EXPORT_SYMBOL_GPL(arch_add_memory); |
44df75e6 | 469 | |
8243229f | 470 | #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) |
4942e998 KM |
471 | int memory_add_physaddr_to_nid(u64 start) |
472 | { | |
473 | return 0; | |
474 | } | |
8c2676a5 | 475 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
4942e998 KM |
476 | #endif |
477 | ||
45e0b78b KM |
478 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
479 | ||
14a62c34 TG |
480 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, |
481 | kcore_modules, kcore_vsyscall; | |
1da177e4 LT |
482 | |
483 | void __init mem_init(void) | |
484 | { | |
0a43e4bf | 485 | long codesize, reservedpages, datasize, initsize; |
1da177e4 | 486 | |
0dc243ae | 487 | pci_iommu_alloc(); |
1da177e4 | 488 | |
48ddb154 | 489 | /* clear_bss() already clear the empty_zero_page */ |
1da177e4 | 490 | |
f2633105 IM |
491 | /* temporary debugging - double check it's true: */ |
492 | { | |
493 | int i; | |
494 | ||
495 | for (i = 0; i < 1024; i++) | |
496 | WARN_ON_ONCE(empty_zero_page[i]); | |
497 | } | |
498 | ||
1da177e4 LT |
499 | reservedpages = 0; |
500 | ||
501 | /* this will put all low memory onto the freelists */ | |
2b97690f | 502 | #ifdef CONFIG_NUMA |
0a43e4bf | 503 | totalram_pages = numa_free_all_bootmem(); |
1da177e4 | 504 | #else |
0a43e4bf | 505 | totalram_pages = free_all_bootmem(); |
1da177e4 | 506 | #endif |
5cb248ab MG |
507 | reservedpages = end_pfn - totalram_pages - |
508 | absent_pages_in_range(0, end_pfn); | |
1da177e4 LT |
509 | after_bootmem = 1; |
510 | ||
511 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
512 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
513 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
514 | ||
515 | /* Register memory areas for /proc/kcore */ | |
14a62c34 TG |
516 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); |
517 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
1da177e4 LT |
518 | VMALLOC_END-VMALLOC_START); |
519 | kclist_add(&kcore_kernel, &_stext, _end - _stext); | |
520 | kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN); | |
14a62c34 | 521 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, |
1da177e4 LT |
522 | VSYSCALL_END - VSYSCALL_START); |
523 | ||
10f22dde | 524 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " |
14a62c34 | 525 | "%ldk reserved, %ldk data, %ldk init)\n", |
1da177e4 LT |
526 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
527 | end_pfn << (PAGE_SHIFT-10), | |
528 | codesize >> 10, | |
529 | reservedpages << (PAGE_SHIFT-10), | |
530 | datasize >> 10, | |
531 | initsize >> 10); | |
76ebd054 TG |
532 | |
533 | cpa_init(); | |
1da177e4 LT |
534 | } |
535 | ||
d167a518 | 536 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
1da177e4 | 537 | { |
bfc734b2 | 538 | unsigned long addr = begin; |
1da177e4 | 539 | |
bfc734b2 | 540 | if (addr >= end) |
d167a518 GH |
541 | return; |
542 | ||
ee01f112 IM |
543 | /* |
544 | * If debugging page accesses then do not free this memory but | |
545 | * mark them not present - any buggy init-section access will | |
546 | * create a kernel page fault: | |
547 | */ | |
548 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
549 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | |
550 | begin, PAGE_ALIGN(end)); | |
551 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | |
552 | #else | |
6fb14755 | 553 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
14a62c34 | 554 | |
bfc734b2 | 555 | for (; addr < end; addr += PAGE_SIZE) { |
e3ebadd9 LT |
556 | ClearPageReserved(virt_to_page(addr)); |
557 | init_page_count(virt_to_page(addr)); | |
558 | memset((void *)(addr & ~(PAGE_SIZE-1)), | |
559 | POISON_FREE_INITMEM, PAGE_SIZE); | |
e3ebadd9 | 560 | free_page(addr); |
1da177e4 LT |
561 | totalram_pages++; |
562 | } | |
ee01f112 | 563 | #endif |
d167a518 GH |
564 | } |
565 | ||
566 | void free_initmem(void) | |
567 | { | |
d167a518 | 568 | free_init_pages("unused kernel memory", |
e3ebadd9 LT |
569 | (unsigned long)(&__init_begin), |
570 | (unsigned long)(&__init_end)); | |
1da177e4 LT |
571 | } |
572 | ||
67df197b | 573 | #ifdef CONFIG_DEBUG_RODATA |
edeed305 AV |
574 | const int rodata_test_data = 0xC3; |
575 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
67df197b | 576 | |
67df197b AV |
577 | void mark_rodata_ro(void) |
578 | { | |
e3ebadd9 | 579 | unsigned long start = (unsigned long)_stext, end; |
67df197b | 580 | |
602033ed LT |
581 | #ifdef CONFIG_HOTPLUG_CPU |
582 | /* It must still be possible to apply SMP alternatives. */ | |
583 | if (num_possible_cpus() > 1) | |
584 | start = (unsigned long)_etext; | |
585 | #endif | |
586 | ||
587 | #ifdef CONFIG_KPROBES | |
588 | start = (unsigned long)__start_rodata; | |
589 | #endif | |
14a62c34 | 590 | |
e3ebadd9 LT |
591 | end = (unsigned long)__end_rodata; |
592 | start = (start + PAGE_SIZE - 1) & PAGE_MASK; | |
593 | end &= PAGE_MASK; | |
594 | if (end <= start) | |
595 | return; | |
596 | ||
67df197b | 597 | |
6fb14755 | 598 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
e3ebadd9 | 599 | (end - start) >> 10); |
984bb80d AV |
600 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
601 | ||
602 | /* | |
603 | * The rodata section (but not the kernel text!) should also be | |
604 | * not-executable. | |
605 | */ | |
606 | start = ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; | |
607 | set_memory_nx(start, (end - start) >> PAGE_SHIFT); | |
67df197b | 608 | |
1a487252 AV |
609 | rodata_test(); |
610 | ||
0c42f392 | 611 | #ifdef CONFIG_CPA_DEBUG |
10f22dde | 612 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); |
6d238cc4 | 613 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 614 | |
10f22dde | 615 | printk(KERN_INFO "Testing CPA: again\n"); |
6d238cc4 | 616 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 617 | #endif |
67df197b AV |
618 | } |
619 | #endif | |
620 | ||
1da177e4 LT |
621 | #ifdef CONFIG_BLK_DEV_INITRD |
622 | void free_initrd_mem(unsigned long start, unsigned long end) | |
623 | { | |
e3ebadd9 | 624 | free_init_pages("initrd memory", start, end); |
1da177e4 LT |
625 | } |
626 | #endif | |
627 | ||
14a62c34 TG |
628 | void __init reserve_bootmem_generic(unsigned long phys, unsigned len) |
629 | { | |
2b97690f | 630 | #ifdef CONFIG_NUMA |
1da177e4 | 631 | int nid = phys_to_nid(phys); |
5e58a02a AK |
632 | #endif |
633 | unsigned long pfn = phys >> PAGE_SHIFT; | |
14a62c34 | 634 | |
5e58a02a | 635 | if (pfn >= end_pfn) { |
14a62c34 TG |
636 | /* |
637 | * This can happen with kdump kernels when accessing | |
638 | * firmware tables: | |
639 | */ | |
5e58a02a AK |
640 | if (pfn < end_pfn_map) |
641 | return; | |
14a62c34 | 642 | |
5e58a02a AK |
643 | printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", |
644 | phys, len); | |
645 | return; | |
646 | } | |
647 | ||
648 | /* Should check here against the e820 map to avoid double free */ | |
649 | #ifdef CONFIG_NUMA | |
72a7fe39 | 650 | reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT); |
14a62c34 | 651 | #else |
72a7fe39 | 652 | reserve_bootmem(phys, len, BOOTMEM_DEFAULT); |
1da177e4 | 653 | #endif |
0e0b864e | 654 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { |
e18c6874 | 655 | dma_reserve += len / PAGE_SIZE; |
0e0b864e MG |
656 | set_dma_reserve(dma_reserve); |
657 | } | |
1da177e4 LT |
658 | } |
659 | ||
14a62c34 TG |
660 | int kern_addr_valid(unsigned long addr) |
661 | { | |
1da177e4 | 662 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; |
14a62c34 TG |
663 | pgd_t *pgd; |
664 | pud_t *pud; | |
665 | pmd_t *pmd; | |
666 | pte_t *pte; | |
1da177e4 LT |
667 | |
668 | if (above != 0 && above != -1UL) | |
14a62c34 TG |
669 | return 0; |
670 | ||
1da177e4 LT |
671 | pgd = pgd_offset_k(addr); |
672 | if (pgd_none(*pgd)) | |
673 | return 0; | |
674 | ||
675 | pud = pud_offset(pgd, addr); | |
676 | if (pud_none(*pud)) | |
14a62c34 | 677 | return 0; |
1da177e4 LT |
678 | |
679 | pmd = pmd_offset(pud, addr); | |
680 | if (pmd_none(*pmd)) | |
681 | return 0; | |
14a62c34 | 682 | |
1da177e4 LT |
683 | if (pmd_large(*pmd)) |
684 | return pfn_valid(pmd_pfn(*pmd)); | |
685 | ||
686 | pte = pte_offset_kernel(pmd, addr); | |
687 | if (pte_none(*pte)) | |
688 | return 0; | |
14a62c34 | 689 | |
1da177e4 LT |
690 | return pfn_valid(pte_pfn(*pte)); |
691 | } | |
692 | ||
14a62c34 TG |
693 | /* |
694 | * A pseudo VMA to allow ptrace access for the vsyscall page. This only | |
695 | * covers the 64bit vsyscall page now. 32bit has a real VMA now and does | |
696 | * not need special handling anymore: | |
697 | */ | |
1da177e4 | 698 | static struct vm_area_struct gate_vma = { |
14a62c34 TG |
699 | .vm_start = VSYSCALL_START, |
700 | .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), | |
701 | .vm_page_prot = PAGE_READONLY_EXEC, | |
702 | .vm_flags = VM_READ | VM_EXEC | |
1da177e4 LT |
703 | }; |
704 | ||
1da177e4 LT |
705 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) |
706 | { | |
707 | #ifdef CONFIG_IA32_EMULATION | |
1e014410 AK |
708 | if (test_tsk_thread_flag(tsk, TIF_IA32)) |
709 | return NULL; | |
1da177e4 LT |
710 | #endif |
711 | return &gate_vma; | |
712 | } | |
713 | ||
714 | int in_gate_area(struct task_struct *task, unsigned long addr) | |
715 | { | |
716 | struct vm_area_struct *vma = get_gate_vma(task); | |
14a62c34 | 717 | |
1e014410 AK |
718 | if (!vma) |
719 | return 0; | |
14a62c34 | 720 | |
1da177e4 LT |
721 | return (addr >= vma->vm_start) && (addr < vma->vm_end); |
722 | } | |
723 | ||
14a62c34 TG |
724 | /* |
725 | * Use this when you have no reliable task/vma, typically from interrupt | |
726 | * context. It is less reliable than using the task's vma and may give | |
727 | * false positives: | |
1da177e4 LT |
728 | */ |
729 | int in_gate_area_no_task(unsigned long addr) | |
730 | { | |
1e014410 | 731 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); |
1da177e4 | 732 | } |
2e1c49db | 733 | |
2aae950b AK |
734 | const char *arch_vma_name(struct vm_area_struct *vma) |
735 | { | |
736 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | |
737 | return "[vdso]"; | |
738 | if (vma == &gate_vma) | |
739 | return "[vsyscall]"; | |
740 | return NULL; | |
741 | } | |
0889eba5 CL |
742 | |
743 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
744 | /* | |
745 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | |
746 | */ | |
14a62c34 TG |
747 | int __meminit |
748 | vmemmap_populate(struct page *start_page, unsigned long size, int node) | |
0889eba5 CL |
749 | { |
750 | unsigned long addr = (unsigned long)start_page; | |
751 | unsigned long end = (unsigned long)(start_page + size); | |
752 | unsigned long next; | |
753 | pgd_t *pgd; | |
754 | pud_t *pud; | |
755 | pmd_t *pmd; | |
756 | ||
757 | for (; addr < end; addr = next) { | |
758 | next = pmd_addr_end(addr, end); | |
759 | ||
760 | pgd = vmemmap_pgd_populate(addr, node); | |
761 | if (!pgd) | |
762 | return -ENOMEM; | |
14a62c34 | 763 | |
0889eba5 CL |
764 | pud = vmemmap_pud_populate(pgd, addr, node); |
765 | if (!pud) | |
766 | return -ENOMEM; | |
767 | ||
768 | pmd = pmd_offset(pud, addr); | |
769 | if (pmd_none(*pmd)) { | |
770 | pte_t entry; | |
14a62c34 TG |
771 | void *p; |
772 | ||
773 | p = vmemmap_alloc_block(PMD_SIZE, node); | |
0889eba5 CL |
774 | if (!p) |
775 | return -ENOMEM; | |
776 | ||
14a62c34 TG |
777 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, |
778 | PAGE_KERNEL_LARGE); | |
0889eba5 CL |
779 | set_pmd(pmd, __pmd(pte_val(entry))); |
780 | ||
781 | printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n", | |
782 | addr, addr + PMD_SIZE - 1, p, node); | |
14a62c34 | 783 | } else { |
0889eba5 | 784 | vmemmap_verify((pte_t *)pmd, node, addr, next); |
14a62c34 | 785 | } |
0889eba5 | 786 | } |
0889eba5 CL |
787 | return 0; |
788 | } | |
789 | #endif |