]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
6 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> | |
7 | */ | |
8 | ||
9 | #include <linux/config.h> | |
10 | #include <linux/signal.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/string.h> | |
15 | #include <linux/types.h> | |
16 | #include <linux/ptrace.h> | |
17 | #include <linux/mman.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/smp.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/pagemap.h> | |
23 | #include <linux/bootmem.h> | |
24 | #include <linux/proc_fs.h> | |
59170891 | 25 | #include <linux/pci.h> |
c9cf5528 | 26 | #include <linux/poison.h> |
17a941d8 | 27 | #include <linux/dma-mapping.h> |
44df75e6 MT |
28 | #include <linux/module.h> |
29 | #include <linux/memory_hotplug.h> | |
1da177e4 LT |
30 | |
31 | #include <asm/processor.h> | |
32 | #include <asm/system.h> | |
33 | #include <asm/uaccess.h> | |
34 | #include <asm/pgtable.h> | |
35 | #include <asm/pgalloc.h> | |
36 | #include <asm/dma.h> | |
37 | #include <asm/fixmap.h> | |
38 | #include <asm/e820.h> | |
39 | #include <asm/apic.h> | |
40 | #include <asm/tlb.h> | |
41 | #include <asm/mmu_context.h> | |
42 | #include <asm/proto.h> | |
43 | #include <asm/smp.h> | |
2bc0414e | 44 | #include <asm/sections.h> |
1da177e4 LT |
45 | |
46 | #ifndef Dprintk | |
47 | #define Dprintk(x...) | |
48 | #endif | |
49 | ||
17a941d8 MBY |
50 | struct dma_mapping_ops* dma_ops; |
51 | EXPORT_SYMBOL(dma_ops); | |
52 | ||
e18c6874 AK |
53 | static unsigned long dma_reserve __initdata; |
54 | ||
1da177e4 LT |
55 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
56 | ||
57 | /* | |
58 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
59 | * physical space so we can cache the place of the first one and move | |
60 | * around without checking the pgd every time. | |
61 | */ | |
62 | ||
63 | void show_mem(void) | |
64 | { | |
e92343cc AK |
65 | long i, total = 0, reserved = 0; |
66 | long shared = 0, cached = 0; | |
1da177e4 LT |
67 | pg_data_t *pgdat; |
68 | struct page *page; | |
69 | ||
e92343cc | 70 | printk(KERN_INFO "Mem-info:\n"); |
1da177e4 | 71 | show_free_areas(); |
e92343cc | 72 | printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
1da177e4 | 73 | |
ec936fc5 | 74 | for_each_online_pgdat(pgdat) { |
1da177e4 LT |
75 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
76 | page = pfn_to_page(pgdat->node_start_pfn + i); | |
77 | total++; | |
e92343cc AK |
78 | if (PageReserved(page)) |
79 | reserved++; | |
80 | else if (PageSwapCache(page)) | |
81 | cached++; | |
82 | else if (page_count(page)) | |
83 | shared += page_count(page) - 1; | |
1da177e4 LT |
84 | } |
85 | } | |
e92343cc AK |
86 | printk(KERN_INFO "%lu pages of RAM\n", total); |
87 | printk(KERN_INFO "%lu reserved pages\n",reserved); | |
88 | printk(KERN_INFO "%lu pages shared\n",shared); | |
89 | printk(KERN_INFO "%lu pages swap cached\n",cached); | |
1da177e4 LT |
90 | } |
91 | ||
1da177e4 LT |
92 | int after_bootmem; |
93 | ||
5f44a669 | 94 | static __init void *spp_getpage(void) |
1da177e4 LT |
95 | { |
96 | void *ptr; | |
97 | if (after_bootmem) | |
98 | ptr = (void *) get_zeroed_page(GFP_ATOMIC); | |
99 | else | |
100 | ptr = alloc_bootmem_pages(PAGE_SIZE); | |
101 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) | |
102 | panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":""); | |
103 | ||
104 | Dprintk("spp_getpage %p\n", ptr); | |
105 | return ptr; | |
106 | } | |
107 | ||
5f44a669 | 108 | static __init void set_pte_phys(unsigned long vaddr, |
1da177e4 LT |
109 | unsigned long phys, pgprot_t prot) |
110 | { | |
111 | pgd_t *pgd; | |
112 | pud_t *pud; | |
113 | pmd_t *pmd; | |
114 | pte_t *pte, new_pte; | |
115 | ||
116 | Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys); | |
117 | ||
118 | pgd = pgd_offset_k(vaddr); | |
119 | if (pgd_none(*pgd)) { | |
120 | printk("PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
121 | return; | |
122 | } | |
123 | pud = pud_offset(pgd, vaddr); | |
124 | if (pud_none(*pud)) { | |
125 | pmd = (pmd_t *) spp_getpage(); | |
126 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); | |
127 | if (pmd != pmd_offset(pud, 0)) { | |
128 | printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0)); | |
129 | return; | |
130 | } | |
131 | } | |
132 | pmd = pmd_offset(pud, vaddr); | |
133 | if (pmd_none(*pmd)) { | |
134 | pte = (pte_t *) spp_getpage(); | |
135 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); | |
136 | if (pte != pte_offset_kernel(pmd, 0)) { | |
137 | printk("PAGETABLE BUG #02!\n"); | |
138 | return; | |
139 | } | |
140 | } | |
141 | new_pte = pfn_pte(phys >> PAGE_SHIFT, prot); | |
142 | ||
143 | pte = pte_offset_kernel(pmd, vaddr); | |
144 | if (!pte_none(*pte) && | |
145 | pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask)) | |
146 | pte_ERROR(*pte); | |
147 | set_pte(pte, new_pte); | |
148 | ||
149 | /* | |
150 | * It's enough to flush this one mapping. | |
151 | * (PGE mappings get flushed as well) | |
152 | */ | |
153 | __flush_tlb_one(vaddr); | |
154 | } | |
155 | ||
156 | /* NOTE: this is meant to be run only at boot */ | |
5f44a669 AK |
157 | void __init |
158 | __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |
1da177e4 LT |
159 | { |
160 | unsigned long address = __fix_to_virt(idx); | |
161 | ||
162 | if (idx >= __end_of_fixed_addresses) { | |
163 | printk("Invalid __set_fixmap\n"); | |
164 | return; | |
165 | } | |
166 | set_pte_phys(address, phys, prot); | |
167 | } | |
168 | ||
169 | unsigned long __initdata table_start, table_end; | |
170 | ||
171 | extern pmd_t temp_boot_pmds[]; | |
172 | ||
173 | static struct temp_map { | |
174 | pmd_t *pmd; | |
175 | void *address; | |
176 | int allocated; | |
177 | } temp_mappings[] __initdata = { | |
178 | { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) }, | |
179 | { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) }, | |
180 | {} | |
181 | }; | |
182 | ||
44df75e6 | 183 | static __meminit void *alloc_low_page(int *index, unsigned long *phys) |
1da177e4 LT |
184 | { |
185 | struct temp_map *ti; | |
186 | int i; | |
187 | unsigned long pfn = table_end++, paddr; | |
188 | void *adr; | |
189 | ||
44df75e6 MT |
190 | if (after_bootmem) { |
191 | adr = (void *)get_zeroed_page(GFP_ATOMIC); | |
192 | *phys = __pa(adr); | |
193 | return adr; | |
194 | } | |
195 | ||
1da177e4 LT |
196 | if (pfn >= end_pfn) |
197 | panic("alloc_low_page: ran out of memory"); | |
198 | for (i = 0; temp_mappings[i].allocated; i++) { | |
199 | if (!temp_mappings[i].pmd) | |
200 | panic("alloc_low_page: ran out of temp mappings"); | |
201 | } | |
202 | ti = &temp_mappings[i]; | |
203 | paddr = (pfn << PAGE_SHIFT) & PMD_MASK; | |
204 | set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE)); | |
205 | ti->allocated = 1; | |
206 | __flush_tlb(); | |
207 | adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK); | |
44df75e6 | 208 | memset(adr, 0, PAGE_SIZE); |
1da177e4 LT |
209 | *index = i; |
210 | *phys = pfn * PAGE_SIZE; | |
211 | return adr; | |
212 | } | |
213 | ||
44df75e6 | 214 | static __meminit void unmap_low_page(int i) |
1da177e4 | 215 | { |
44df75e6 MT |
216 | struct temp_map *ti; |
217 | ||
218 | if (after_bootmem) | |
219 | return; | |
220 | ||
221 | ti = &temp_mappings[i]; | |
1da177e4 LT |
222 | set_pmd(ti->pmd, __pmd(0)); |
223 | ti->allocated = 0; | |
224 | } | |
225 | ||
f2d3efed AK |
226 | /* Must run before zap_low_mappings */ |
227 | __init void *early_ioremap(unsigned long addr, unsigned long size) | |
228 | { | |
229 | unsigned long map = round_down(addr, LARGE_PAGE_SIZE); | |
230 | ||
231 | /* actually usually some more */ | |
232 | if (size >= LARGE_PAGE_SIZE) { | |
233 | printk("SMBIOS area too long %lu\n", size); | |
234 | return NULL; | |
235 | } | |
236 | set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); | |
237 | map += LARGE_PAGE_SIZE; | |
238 | set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); | |
239 | __flush_tlb(); | |
240 | return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1)); | |
241 | } | |
242 | ||
243 | /* To avoid virtual aliases later */ | |
244 | __init void early_iounmap(void *addr, unsigned long size) | |
245 | { | |
246 | if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address) | |
247 | printk("early_iounmap: bad address %p\n", addr); | |
248 | set_pmd(temp_mappings[0].pmd, __pmd(0)); | |
249 | set_pmd(temp_mappings[1].pmd, __pmd(0)); | |
250 | __flush_tlb(); | |
251 | } | |
252 | ||
44df75e6 MT |
253 | static void __meminit |
254 | phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) | |
255 | { | |
256 | int i; | |
257 | ||
258 | for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { | |
259 | unsigned long entry; | |
260 | ||
5f51e139 JB |
261 | if (address >= end) { |
262 | if (!after_bootmem) | |
263 | for (; i < PTRS_PER_PMD; i++, pmd++) | |
264 | set_pmd(pmd, __pmd(0)); | |
44df75e6 MT |
265 | break; |
266 | } | |
267 | entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; | |
268 | entry &= __supported_pte_mask; | |
269 | set_pmd(pmd, __pmd(entry)); | |
270 | } | |
271 | } | |
272 | ||
273 | static void __meminit | |
274 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) | |
275 | { | |
276 | pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address)); | |
277 | ||
278 | if (pmd_none(*pmd)) { | |
279 | spin_lock(&init_mm.page_table_lock); | |
280 | phys_pmd_init(pmd, address, end); | |
281 | spin_unlock(&init_mm.page_table_lock); | |
282 | __flush_tlb_all(); | |
283 | } | |
284 | } | |
285 | ||
286 | static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) | |
1da177e4 | 287 | { |
44df75e6 | 288 | long i = pud_index(address); |
1da177e4 | 289 | |
1da177e4 | 290 | pud = pud + i; |
44df75e6 MT |
291 | |
292 | if (after_bootmem && pud_val(*pud)) { | |
293 | phys_pmd_update(pud, address, end); | |
294 | return; | |
295 | } | |
296 | ||
1da177e4 LT |
297 | for (; i < PTRS_PER_PUD; pud++, i++) { |
298 | int map; | |
299 | unsigned long paddr, pmd_phys; | |
300 | pmd_t *pmd; | |
301 | ||
44df75e6 MT |
302 | paddr = (address & PGDIR_MASK) + i*PUD_SIZE; |
303 | if (paddr >= end) | |
1da177e4 | 304 | break; |
1da177e4 | 305 | |
eee5a9fa | 306 | if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) { |
1da177e4 LT |
307 | set_pud(pud, __pud(0)); |
308 | continue; | |
309 | } | |
310 | ||
311 | pmd = alloc_low_page(&map, &pmd_phys); | |
44df75e6 | 312 | spin_lock(&init_mm.page_table_lock); |
1da177e4 | 313 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); |
44df75e6 MT |
314 | phys_pmd_init(pmd, paddr, end); |
315 | spin_unlock(&init_mm.page_table_lock); | |
1da177e4 LT |
316 | unmap_low_page(map); |
317 | } | |
318 | __flush_tlb(); | |
319 | } | |
320 | ||
321 | static void __init find_early_table_space(unsigned long end) | |
322 | { | |
6c5acd16 | 323 | unsigned long puds, pmds, tables, start; |
1da177e4 LT |
324 | |
325 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | |
326 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | |
327 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) + | |
328 | round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | |
329 | ||
ee408c79 AK |
330 | /* RED-PEN putting page tables only on node 0 could |
331 | cause a hotspot and fill up ZONE_DMA. The page tables | |
332 | need roughly 0.5KB per GB. */ | |
333 | start = 0x8000; | |
334 | table_start = find_e820_area(start, end, tables); | |
1da177e4 LT |
335 | if (table_start == -1UL) |
336 | panic("Cannot find space for the kernel page tables"); | |
337 | ||
338 | table_start >>= PAGE_SHIFT; | |
339 | table_end = table_start; | |
44df75e6 MT |
340 | |
341 | early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", | |
5f51e139 JB |
342 | end, table_start << PAGE_SHIFT, |
343 | (table_start << PAGE_SHIFT) + tables); | |
1da177e4 LT |
344 | } |
345 | ||
346 | /* Setup the direct mapping of the physical memory at PAGE_OFFSET. | |
347 | This runs before bootmem is initialized and gets pages directly from the | |
348 | physical memory. To access them they are temporarily mapped. */ | |
44df75e6 | 349 | void __meminit init_memory_mapping(unsigned long start, unsigned long end) |
1da177e4 LT |
350 | { |
351 | unsigned long next; | |
352 | ||
353 | Dprintk("init_memory_mapping\n"); | |
354 | ||
355 | /* | |
356 | * Find space for the kernel direct mapping tables. | |
357 | * Later we should allocate these tables in the local node of the memory | |
358 | * mapped. Unfortunately this is done currently before the nodes are | |
359 | * discovered. | |
360 | */ | |
44df75e6 MT |
361 | if (!after_bootmem) |
362 | find_early_table_space(end); | |
1da177e4 LT |
363 | |
364 | start = (unsigned long)__va(start); | |
365 | end = (unsigned long)__va(end); | |
366 | ||
367 | for (; start < end; start = next) { | |
368 | int map; | |
369 | unsigned long pud_phys; | |
44df75e6 MT |
370 | pgd_t *pgd = pgd_offset_k(start); |
371 | pud_t *pud; | |
372 | ||
373 | if (after_bootmem) | |
d2ae5b5f | 374 | pud = pud_offset(pgd, start & PGDIR_MASK); |
44df75e6 MT |
375 | else |
376 | pud = alloc_low_page(&map, &pud_phys); | |
377 | ||
1da177e4 LT |
378 | next = start + PGDIR_SIZE; |
379 | if (next > end) | |
380 | next = end; | |
381 | phys_pud_init(pud, __pa(start), __pa(next)); | |
44df75e6 MT |
382 | if (!after_bootmem) |
383 | set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); | |
1da177e4 LT |
384 | unmap_low_page(map); |
385 | } | |
386 | ||
44df75e6 MT |
387 | if (!after_bootmem) |
388 | asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features)); | |
1da177e4 | 389 | __flush_tlb_all(); |
1da177e4 LT |
390 | } |
391 | ||
f6c2e333 | 392 | void __cpuinit zap_low_mappings(int cpu) |
1da177e4 | 393 | { |
f6c2e333 SS |
394 | if (cpu == 0) { |
395 | pgd_t *pgd = pgd_offset_k(0UL); | |
396 | pgd_clear(pgd); | |
397 | } else { | |
398 | /* | |
399 | * For AP's, zap the low identity mappings by changing the cr3 | |
400 | * to init_level4_pgt and doing local flush tlb all | |
401 | */ | |
402 | asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt))); | |
403 | } | |
404 | __flush_tlb_all(); | |
1da177e4 LT |
405 | } |
406 | ||
a2f1b424 AK |
407 | /* Compute zone sizes for the DMA and DMA32 zones in a node. */ |
408 | __init void | |
409 | size_zones(unsigned long *z, unsigned long *h, | |
410 | unsigned long start_pfn, unsigned long end_pfn) | |
411 | { | |
412 | int i; | |
413 | unsigned long w; | |
414 | ||
415 | for (i = 0; i < MAX_NR_ZONES; i++) | |
416 | z[i] = 0; | |
417 | ||
418 | if (start_pfn < MAX_DMA_PFN) | |
419 | z[ZONE_DMA] = MAX_DMA_PFN - start_pfn; | |
420 | if (start_pfn < MAX_DMA32_PFN) { | |
421 | unsigned long dma32_pfn = MAX_DMA32_PFN; | |
422 | if (dma32_pfn > end_pfn) | |
423 | dma32_pfn = end_pfn; | |
424 | z[ZONE_DMA32] = dma32_pfn - start_pfn; | |
425 | } | |
426 | z[ZONE_NORMAL] = end_pfn - start_pfn; | |
427 | ||
428 | /* Remove lower zones from higher ones. */ | |
429 | w = 0; | |
430 | for (i = 0; i < MAX_NR_ZONES; i++) { | |
431 | if (z[i]) | |
432 | z[i] -= w; | |
433 | w += z[i]; | |
434 | } | |
435 | ||
436 | /* Compute holes */ | |
576fc097 | 437 | w = start_pfn; |
a2f1b424 AK |
438 | for (i = 0; i < MAX_NR_ZONES; i++) { |
439 | unsigned long s = w; | |
440 | w += z[i]; | |
441 | h[i] = e820_hole_size(s, w); | |
442 | } | |
e18c6874 AK |
443 | |
444 | /* Add the space pace needed for mem_map to the holes too. */ | |
445 | for (i = 0; i < MAX_NR_ZONES; i++) | |
446 | h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE; | |
447 | ||
448 | /* The 16MB DMA zone has the kernel and other misc mappings. | |
449 | Account them too */ | |
450 | if (h[ZONE_DMA]) { | |
451 | h[ZONE_DMA] += dma_reserve; | |
452 | if (h[ZONE_DMA] >= z[ZONE_DMA]) { | |
453 | printk(KERN_WARNING | |
454 | "Kernel too large and filling up ZONE_DMA?\n"); | |
455 | h[ZONE_DMA] = z[ZONE_DMA]; | |
456 | } | |
457 | } | |
a2f1b424 AK |
458 | } |
459 | ||
2b97690f | 460 | #ifndef CONFIG_NUMA |
1da177e4 LT |
461 | void __init paging_init(void) |
462 | { | |
a2f1b424 | 463 | unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES]; |
44df75e6 MT |
464 | |
465 | memory_present(0, 0, end_pfn); | |
466 | sparse_init(); | |
a2f1b424 AK |
467 | size_zones(zones, holes, 0, end_pfn); |
468 | free_area_init_node(0, NODE_DATA(0), zones, | |
469 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes); | |
1da177e4 LT |
470 | } |
471 | #endif | |
472 | ||
473 | /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches | |
474 | from the CPU leading to inconsistent cache lines. address and size | |
475 | must be aligned to 2MB boundaries. | |
476 | Does nothing when the mapping doesn't exist. */ | |
477 | void __init clear_kernel_mapping(unsigned long address, unsigned long size) | |
478 | { | |
479 | unsigned long end = address + size; | |
480 | ||
481 | BUG_ON(address & ~LARGE_PAGE_MASK); | |
482 | BUG_ON(size & ~LARGE_PAGE_MASK); | |
483 | ||
484 | for (; address < end; address += LARGE_PAGE_SIZE) { | |
485 | pgd_t *pgd = pgd_offset_k(address); | |
486 | pud_t *pud; | |
487 | pmd_t *pmd; | |
488 | if (pgd_none(*pgd)) | |
489 | continue; | |
490 | pud = pud_offset(pgd, address); | |
491 | if (pud_none(*pud)) | |
492 | continue; | |
493 | pmd = pmd_offset(pud, address); | |
494 | if (!pmd || pmd_none(*pmd)) | |
495 | continue; | |
496 | if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { | |
497 | /* Could handle this, but it should not happen currently. */ | |
498 | printk(KERN_ERR | |
499 | "clear_kernel_mapping: mapping has been split. will leak memory\n"); | |
500 | pmd_ERROR(*pmd); | |
501 | } | |
502 | set_pmd(pmd, __pmd(0)); | |
503 | } | |
504 | __flush_tlb_all(); | |
505 | } | |
506 | ||
44df75e6 MT |
507 | /* |
508 | * Memory hotplug specific functions | |
44df75e6 | 509 | */ |
44df75e6 MT |
510 | void online_page(struct page *page) |
511 | { | |
512 | ClearPageReserved(page); | |
7835e98b | 513 | init_page_count(page); |
44df75e6 MT |
514 | __free_page(page); |
515 | totalram_pages++; | |
516 | num_physpages++; | |
517 | } | |
518 | ||
bc02af93 | 519 | #ifdef CONFIG_MEMORY_HOTPLUG |
9d99aaa3 | 520 | /* |
bc02af93 YG |
521 | * XXX: memory_add_physaddr_to_nid() is to find node id from physical address |
522 | * via probe interface of sysfs. If acpi notifies hot-add event, then it | |
523 | * can tell node id by searching dsdt. But, probe interface doesn't have | |
524 | * node id. So, return 0 as node id at this time. | |
9d99aaa3 | 525 | */ |
bc02af93 YG |
526 | #ifdef CONFIG_NUMA |
527 | int memory_add_physaddr_to_nid(u64 start) | |
9d99aaa3 | 528 | { |
bc02af93 | 529 | return 0; |
9d99aaa3 AK |
530 | } |
531 | #endif | |
532 | ||
533 | /* | |
534 | * Memory is added always to NORMAL zone. This means you will never get | |
535 | * additional DMA/DMA32 memory. | |
536 | */ | |
bc02af93 | 537 | int arch_add_memory(int nid, u64 start, u64 size) |
44df75e6 | 538 | { |
bc02af93 | 539 | struct pglist_data *pgdat = NODE_DATA(nid); |
44df75e6 MT |
540 | struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2; |
541 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
542 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
543 | int ret; | |
544 | ||
545 | ret = __add_pages(zone, start_pfn, nr_pages); | |
546 | if (ret) | |
547 | goto error; | |
548 | ||
549 | init_memory_mapping(start, (start + size -1)); | |
550 | ||
551 | return ret; | |
552 | error: | |
553 | printk("%s: Problem encountered in __add_pages!\n", __func__); | |
554 | return ret; | |
555 | } | |
bc02af93 | 556 | EXPORT_SYMBOL_GPL(arch_add_memory); |
44df75e6 MT |
557 | |
558 | int remove_memory(u64 start, u64 size) | |
559 | { | |
560 | return -EINVAL; | |
561 | } | |
562 | EXPORT_SYMBOL_GPL(remove_memory); | |
563 | ||
bc02af93 YG |
564 | #else /* CONFIG_MEMORY_HOTPLUG */ |
565 | /* | |
566 | * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, | |
567 | * just online the pages. | |
568 | */ | |
569 | int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | |
570 | { | |
571 | int err = -EIO; | |
572 | unsigned long pfn; | |
573 | unsigned long total = 0, mem = 0; | |
574 | for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { | |
575 | if (pfn_valid(pfn)) { | |
576 | online_page(pfn_to_page(pfn)); | |
577 | err = 0; | |
578 | mem++; | |
579 | } | |
580 | total++; | |
581 | } | |
582 | if (!err) { | |
583 | z->spanned_pages += total; | |
584 | z->present_pages += mem; | |
585 | z->zone_pgdat->node_spanned_pages += total; | |
586 | z->zone_pgdat->node_present_pages += mem; | |
587 | } | |
588 | return err; | |
589 | } | |
590 | #endif /* CONFIG_MEMORY_HOTPLUG */ | |
44df75e6 | 591 | |
1da177e4 LT |
592 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, |
593 | kcore_vsyscall; | |
594 | ||
595 | void __init mem_init(void) | |
596 | { | |
0a43e4bf | 597 | long codesize, reservedpages, datasize, initsize; |
1da177e4 | 598 | |
0dc243ae | 599 | pci_iommu_alloc(); |
1da177e4 LT |
600 | |
601 | /* How many end-of-memory variables you have, grandma! */ | |
602 | max_low_pfn = end_pfn; | |
603 | max_pfn = end_pfn; | |
604 | num_physpages = end_pfn; | |
605 | high_memory = (void *) __va(end_pfn * PAGE_SIZE); | |
606 | ||
607 | /* clear the zero-page */ | |
608 | memset(empty_zero_page, 0, PAGE_SIZE); | |
609 | ||
610 | reservedpages = 0; | |
611 | ||
612 | /* this will put all low memory onto the freelists */ | |
2b97690f | 613 | #ifdef CONFIG_NUMA |
0a43e4bf | 614 | totalram_pages = numa_free_all_bootmem(); |
1da177e4 | 615 | #else |
0a43e4bf | 616 | totalram_pages = free_all_bootmem(); |
1da177e4 | 617 | #endif |
0a43e4bf | 618 | reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn); |
1da177e4 LT |
619 | |
620 | after_bootmem = 1; | |
621 | ||
622 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
623 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
624 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
625 | ||
626 | /* Register memory areas for /proc/kcore */ | |
627 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); | |
628 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
629 | VMALLOC_END-VMALLOC_START); | |
630 | kclist_add(&kcore_kernel, &_stext, _end - _stext); | |
631 | kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN); | |
632 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, | |
633 | VSYSCALL_END - VSYSCALL_START); | |
634 | ||
0a43e4bf | 635 | printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", |
1da177e4 LT |
636 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
637 | end_pfn << (PAGE_SHIFT-10), | |
638 | codesize >> 10, | |
639 | reservedpages << (PAGE_SHIFT-10), | |
640 | datasize >> 10, | |
641 | initsize >> 10); | |
642 | ||
f6c2e333 | 643 | #ifdef CONFIG_SMP |
1da177e4 | 644 | /* |
f6c2e333 SS |
645 | * Sync boot_level4_pgt mappings with the init_level4_pgt |
646 | * except for the low identity mappings which are already zapped | |
647 | * in init_level4_pgt. This sync-up is essential for AP's bringup | |
1da177e4 | 648 | */ |
f6c2e333 | 649 | memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t)); |
1da177e4 LT |
650 | #endif |
651 | } | |
652 | ||
d167a518 | 653 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
1da177e4 LT |
654 | { |
655 | unsigned long addr; | |
656 | ||
d167a518 GH |
657 | if (begin >= end) |
658 | return; | |
659 | ||
660 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | |
661 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | |
1da177e4 | 662 | ClearPageReserved(virt_to_page(addr)); |
7835e98b | 663 | init_page_count(virt_to_page(addr)); |
c9cf5528 RD |
664 | memset((void *)(addr & ~(PAGE_SIZE-1)), |
665 | POISON_FREE_INITMEM, PAGE_SIZE); | |
1da177e4 LT |
666 | free_page(addr); |
667 | totalram_pages++; | |
668 | } | |
d167a518 GH |
669 | } |
670 | ||
671 | void free_initmem(void) | |
672 | { | |
c9cf5528 RD |
673 | memset(__initdata_begin, POISON_FREE_INITDATA, |
674 | __initdata_end - __initdata_begin); | |
d167a518 GH |
675 | free_init_pages("unused kernel memory", |
676 | (unsigned long)(&__init_begin), | |
677 | (unsigned long)(&__init_end)); | |
1da177e4 LT |
678 | } |
679 | ||
67df197b AV |
680 | #ifdef CONFIG_DEBUG_RODATA |
681 | ||
682 | extern char __start_rodata, __end_rodata; | |
683 | void mark_rodata_ro(void) | |
684 | { | |
685 | unsigned long addr = (unsigned long)&__start_rodata; | |
686 | ||
687 | for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE) | |
688 | change_page_attr_addr(addr, 1, PAGE_KERNEL_RO); | |
689 | ||
690 | printk ("Write protecting the kernel read-only data: %luk\n", | |
691 | (&__end_rodata - &__start_rodata) >> 10); | |
692 | ||
693 | /* | |
694 | * change_page_attr_addr() requires a global_flush_tlb() call after it. | |
695 | * We do this after the printk so that if something went wrong in the | |
696 | * change, the printk gets out at least to give a better debug hint | |
697 | * of who is the culprit. | |
698 | */ | |
699 | global_flush_tlb(); | |
700 | } | |
701 | #endif | |
702 | ||
1da177e4 LT |
703 | #ifdef CONFIG_BLK_DEV_INITRD |
704 | void free_initrd_mem(unsigned long start, unsigned long end) | |
705 | { | |
d167a518 | 706 | free_init_pages("initrd memory", start, end); |
1da177e4 LT |
707 | } |
708 | #endif | |
709 | ||
710 | void __init reserve_bootmem_generic(unsigned long phys, unsigned len) | |
711 | { | |
712 | /* Should check here against the e820 map to avoid double free */ | |
2b97690f | 713 | #ifdef CONFIG_NUMA |
1da177e4 LT |
714 | int nid = phys_to_nid(phys); |
715 | reserve_bootmem_node(NODE_DATA(nid), phys, len); | |
716 | #else | |
717 | reserve_bootmem(phys, len); | |
718 | #endif | |
e18c6874 AK |
719 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) |
720 | dma_reserve += len / PAGE_SIZE; | |
1da177e4 LT |
721 | } |
722 | ||
723 | int kern_addr_valid(unsigned long addr) | |
724 | { | |
725 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; | |
726 | pgd_t *pgd; | |
727 | pud_t *pud; | |
728 | pmd_t *pmd; | |
729 | pte_t *pte; | |
730 | ||
731 | if (above != 0 && above != -1UL) | |
732 | return 0; | |
733 | ||
734 | pgd = pgd_offset_k(addr); | |
735 | if (pgd_none(*pgd)) | |
736 | return 0; | |
737 | ||
738 | pud = pud_offset(pgd, addr); | |
739 | if (pud_none(*pud)) | |
740 | return 0; | |
741 | ||
742 | pmd = pmd_offset(pud, addr); | |
743 | if (pmd_none(*pmd)) | |
744 | return 0; | |
745 | if (pmd_large(*pmd)) | |
746 | return pfn_valid(pmd_pfn(*pmd)); | |
747 | ||
748 | pte = pte_offset_kernel(pmd, addr); | |
749 | if (pte_none(*pte)) | |
750 | return 0; | |
751 | return pfn_valid(pte_pfn(*pte)); | |
752 | } | |
753 | ||
754 | #ifdef CONFIG_SYSCTL | |
755 | #include <linux/sysctl.h> | |
756 | ||
757 | extern int exception_trace, page_fault_trace; | |
758 | ||
759 | static ctl_table debug_table2[] = { | |
760 | { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL, | |
761 | proc_dointvec }, | |
1da177e4 LT |
762 | { 0, } |
763 | }; | |
764 | ||
765 | static ctl_table debug_root_table2[] = { | |
766 | { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555, | |
767 | .child = debug_table2 }, | |
768 | { 0 }, | |
769 | }; | |
770 | ||
771 | static __init int x8664_sysctl_init(void) | |
772 | { | |
773 | register_sysctl_table(debug_root_table2, 1); | |
774 | return 0; | |
775 | } | |
776 | __initcall(x8664_sysctl_init); | |
777 | #endif | |
778 | ||
1e014410 AK |
779 | /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only |
780 | covers the 64bit vsyscall page now. 32bit has a real VMA now and does | |
781 | not need special handling anymore. */ | |
1da177e4 LT |
782 | |
783 | static struct vm_area_struct gate_vma = { | |
784 | .vm_start = VSYSCALL_START, | |
785 | .vm_end = VSYSCALL_END, | |
786 | .vm_page_prot = PAGE_READONLY | |
787 | }; | |
788 | ||
1da177e4 LT |
789 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) |
790 | { | |
791 | #ifdef CONFIG_IA32_EMULATION | |
1e014410 AK |
792 | if (test_tsk_thread_flag(tsk, TIF_IA32)) |
793 | return NULL; | |
1da177e4 LT |
794 | #endif |
795 | return &gate_vma; | |
796 | } | |
797 | ||
798 | int in_gate_area(struct task_struct *task, unsigned long addr) | |
799 | { | |
800 | struct vm_area_struct *vma = get_gate_vma(task); | |
1e014410 AK |
801 | if (!vma) |
802 | return 0; | |
1da177e4 LT |
803 | return (addr >= vma->vm_start) && (addr < vma->vm_end); |
804 | } | |
805 | ||
806 | /* Use this when you have no reliable task/vma, typically from interrupt | |
807 | * context. It is less reliable than using the task's vma and may give | |
808 | * false positives. | |
809 | */ | |
810 | int in_gate_area_no_task(unsigned long addr) | |
811 | { | |
1e014410 | 812 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); |
1da177e4 | 813 | } |