]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
6 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> | |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/bootmem.h> | |
23 | #include <linux/proc_fs.h> | |
59170891 | 24 | #include <linux/pci.h> |
6fb14755 | 25 | #include <linux/pfn.h> |
c9cf5528 | 26 | #include <linux/poison.h> |
17a941d8 | 27 | #include <linux/dma-mapping.h> |
44df75e6 MT |
28 | #include <linux/module.h> |
29 | #include <linux/memory_hotplug.h> | |
ae32b129 | 30 | #include <linux/nmi.h> |
1da177e4 LT |
31 | |
32 | #include <asm/processor.h> | |
33 | #include <asm/system.h> | |
34 | #include <asm/uaccess.h> | |
35 | #include <asm/pgtable.h> | |
36 | #include <asm/pgalloc.h> | |
37 | #include <asm/dma.h> | |
38 | #include <asm/fixmap.h> | |
39 | #include <asm/e820.h> | |
40 | #include <asm/apic.h> | |
41 | #include <asm/tlb.h> | |
42 | #include <asm/mmu_context.h> | |
43 | #include <asm/proto.h> | |
44 | #include <asm/smp.h> | |
2bc0414e | 45 | #include <asm/sections.h> |
718fc13b | 46 | #include <asm/kdebug.h> |
aaa64e04 | 47 | #include <asm/numa.h> |
1da177e4 | 48 | |
14a62c34 | 49 | const struct dma_mapping_ops *dma_ops; |
17a941d8 MBY |
50 | EXPORT_SYMBOL(dma_ops); |
51 | ||
e18c6874 AK |
52 | static unsigned long dma_reserve __initdata; |
53 | ||
1da177e4 LT |
54 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
55 | ||
56 | /* | |
57 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
58 | * physical space so we can cache the place of the first one and move | |
59 | * around without checking the pgd every time. | |
60 | */ | |
61 | ||
62 | void show_mem(void) | |
63 | { | |
e92343cc AK |
64 | long i, total = 0, reserved = 0; |
65 | long shared = 0, cached = 0; | |
1da177e4 | 66 | struct page *page; |
14a62c34 | 67 | pg_data_t *pgdat; |
1da177e4 | 68 | |
e92343cc | 69 | printk(KERN_INFO "Mem-info:\n"); |
1da177e4 | 70 | show_free_areas(); |
14a62c34 TG |
71 | printk(KERN_INFO "Free swap: %6ldkB\n", |
72 | nr_swap_pages << (PAGE_SHIFT-10)); | |
1da177e4 | 73 | |
ec936fc5 | 74 | for_each_online_pgdat(pgdat) { |
14a62c34 TG |
75 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
76 | /* | |
77 | * This loop can take a while with 256 GB and | |
78 | * 4k pages so defer the NMI watchdog: | |
79 | */ | |
80 | if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) | |
ae32b129 | 81 | touch_nmi_watchdog(); |
14a62c34 | 82 | |
12710a56 BP |
83 | if (!pfn_valid(pgdat->node_start_pfn + i)) |
84 | continue; | |
14a62c34 | 85 | |
1da177e4 LT |
86 | page = pfn_to_page(pgdat->node_start_pfn + i); |
87 | total++; | |
e92343cc AK |
88 | if (PageReserved(page)) |
89 | reserved++; | |
90 | else if (PageSwapCache(page)) | |
91 | cached++; | |
92 | else if (page_count(page)) | |
93 | shared += page_count(page) - 1; | |
14a62c34 | 94 | } |
1da177e4 | 95 | } |
14a62c34 TG |
96 | printk(KERN_INFO "%lu pages of RAM\n", total); |
97 | printk(KERN_INFO "%lu reserved pages\n", reserved); | |
98 | printk(KERN_INFO "%lu pages shared\n", shared); | |
99 | printk(KERN_INFO "%lu pages swap cached\n", cached); | |
1da177e4 LT |
100 | } |
101 | ||
1da177e4 LT |
102 | int after_bootmem; |
103 | ||
5f44a669 | 104 | static __init void *spp_getpage(void) |
14a62c34 | 105 | { |
1da177e4 | 106 | void *ptr; |
14a62c34 | 107 | |
1da177e4 | 108 | if (after_bootmem) |
14a62c34 | 109 | ptr = (void *) get_zeroed_page(GFP_ATOMIC); |
1da177e4 LT |
110 | else |
111 | ptr = alloc_bootmem_pages(PAGE_SIZE); | |
14a62c34 TG |
112 | |
113 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | |
114 | panic("set_pte_phys: cannot allocate page data %s\n", | |
115 | after_bootmem ? "after bootmem" : ""); | |
116 | } | |
1da177e4 | 117 | |
10f22dde | 118 | pr_debug("spp_getpage %p\n", ptr); |
14a62c34 | 119 | |
1da177e4 | 120 | return ptr; |
14a62c34 | 121 | } |
1da177e4 | 122 | |
14a62c34 TG |
123 | static __init void |
124 | set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot) | |
1da177e4 LT |
125 | { |
126 | pgd_t *pgd; | |
127 | pud_t *pud; | |
128 | pmd_t *pmd; | |
129 | pte_t *pte, new_pte; | |
130 | ||
10f22dde | 131 | pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys); |
1da177e4 LT |
132 | |
133 | pgd = pgd_offset_k(vaddr); | |
134 | if (pgd_none(*pgd)) { | |
10f22dde IM |
135 | printk(KERN_ERR |
136 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
1da177e4 LT |
137 | return; |
138 | } | |
139 | pud = pud_offset(pgd, vaddr); | |
140 | if (pud_none(*pud)) { | |
14a62c34 | 141 | pmd = (pmd_t *) spp_getpage(); |
1da177e4 LT |
142 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); |
143 | if (pmd != pmd_offset(pud, 0)) { | |
10f22dde | 144 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", |
14a62c34 | 145 | pmd, pmd_offset(pud, 0)); |
1da177e4 LT |
146 | return; |
147 | } | |
148 | } | |
149 | pmd = pmd_offset(pud, vaddr); | |
150 | if (pmd_none(*pmd)) { | |
151 | pte = (pte_t *) spp_getpage(); | |
152 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); | |
153 | if (pte != pte_offset_kernel(pmd, 0)) { | |
10f22dde | 154 | printk(KERN_ERR "PAGETABLE BUG #02!\n"); |
1da177e4 LT |
155 | return; |
156 | } | |
157 | } | |
158 | new_pte = pfn_pte(phys >> PAGE_SHIFT, prot); | |
159 | ||
160 | pte = pte_offset_kernel(pmd, vaddr); | |
161 | if (!pte_none(*pte) && | |
162 | pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask)) | |
163 | pte_ERROR(*pte); | |
164 | set_pte(pte, new_pte); | |
165 | ||
166 | /* | |
167 | * It's enough to flush this one mapping. | |
168 | * (PGE mappings get flushed as well) | |
169 | */ | |
170 | __flush_tlb_one(vaddr); | |
171 | } | |
172 | ||
173 | /* NOTE: this is meant to be run only at boot */ | |
14a62c34 TG |
174 | void __init |
175 | __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |
1da177e4 LT |
176 | { |
177 | unsigned long address = __fix_to_virt(idx); | |
178 | ||
179 | if (idx >= __end_of_fixed_addresses) { | |
10f22dde | 180 | printk(KERN_ERR "Invalid __set_fixmap\n"); |
1da177e4 LT |
181 | return; |
182 | } | |
183 | set_pte_phys(address, phys, prot); | |
184 | } | |
185 | ||
75175278 AK |
186 | static unsigned long __initdata table_start; |
187 | static unsigned long __meminitdata table_end; | |
1da177e4 | 188 | |
dafe41ee | 189 | static __meminit void *alloc_low_page(unsigned long *phys) |
14a62c34 | 190 | { |
dafe41ee | 191 | unsigned long pfn = table_end++; |
1da177e4 LT |
192 | void *adr; |
193 | ||
44df75e6 MT |
194 | if (after_bootmem) { |
195 | adr = (void *)get_zeroed_page(GFP_ATOMIC); | |
196 | *phys = __pa(adr); | |
14a62c34 | 197 | |
44df75e6 MT |
198 | return adr; |
199 | } | |
200 | ||
14a62c34 TG |
201 | if (pfn >= end_pfn) |
202 | panic("alloc_low_page: ran out of memory"); | |
dafe41ee VG |
203 | |
204 | adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE); | |
44df75e6 | 205 | memset(adr, 0, PAGE_SIZE); |
dafe41ee VG |
206 | *phys = pfn * PAGE_SIZE; |
207 | return adr; | |
208 | } | |
1da177e4 | 209 | |
dafe41ee | 210 | static __meminit void unmap_low_page(void *adr) |
14a62c34 | 211 | { |
44df75e6 MT |
212 | if (after_bootmem) |
213 | return; | |
214 | ||
dafe41ee | 215 | early_iounmap(adr, PAGE_SIZE); |
14a62c34 | 216 | } |
1da177e4 | 217 | |
f2d3efed | 218 | /* Must run before zap_low_mappings */ |
a3142c8e | 219 | __meminit void *early_ioremap(unsigned long addr, unsigned long size) |
f2d3efed | 220 | { |
dafe41ee | 221 | pmd_t *pmd, *last_pmd; |
14a62c34 | 222 | unsigned long vaddr; |
dafe41ee VG |
223 | int i, pmds; |
224 | ||
225 | pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE; | |
226 | vaddr = __START_KERNEL_map; | |
227 | pmd = level2_kernel_pgt; | |
228 | last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1; | |
14a62c34 | 229 | |
dafe41ee VG |
230 | for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) { |
231 | for (i = 0; i < pmds; i++) { | |
232 | if (pmd_present(pmd[i])) | |
14a62c34 | 233 | goto continue_outer_loop; |
dafe41ee VG |
234 | } |
235 | vaddr += addr & ~PMD_MASK; | |
236 | addr &= PMD_MASK; | |
14a62c34 | 237 | |
dafe41ee | 238 | for (i = 0; i < pmds; i++, addr += PMD_SIZE) |
929fd589 | 239 | set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC)); |
1a2b4412 | 240 | __flush_tlb_all(); |
14a62c34 | 241 | |
dafe41ee | 242 | return (void *)vaddr; |
14a62c34 | 243 | continue_outer_loop: |
dafe41ee | 244 | ; |
f2d3efed | 245 | } |
10f22dde | 246 | printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size); |
14a62c34 | 247 | |
dafe41ee | 248 | return NULL; |
f2d3efed AK |
249 | } |
250 | ||
14a62c34 TG |
251 | /* |
252 | * To avoid virtual aliases later: | |
253 | */ | |
a3142c8e | 254 | __meminit void early_iounmap(void *addr, unsigned long size) |
f2d3efed | 255 | { |
dafe41ee VG |
256 | unsigned long vaddr; |
257 | pmd_t *pmd; | |
258 | int i, pmds; | |
259 | ||
260 | vaddr = (unsigned long)addr; | |
261 | pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE; | |
262 | pmd = level2_kernel_pgt + pmd_index(vaddr); | |
14a62c34 | 263 | |
dafe41ee VG |
264 | for (i = 0; i < pmds; i++) |
265 | pmd_clear(pmd + i); | |
14a62c34 | 266 | |
1a2b4412 | 267 | __flush_tlb_all(); |
f2d3efed AK |
268 | } |
269 | ||
44df75e6 | 270 | static void __meminit |
6ad91658 | 271 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) |
44df75e6 | 272 | { |
6ad91658 | 273 | int i = pmd_index(address); |
44df75e6 | 274 | |
6ad91658 | 275 | for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { |
44df75e6 | 276 | unsigned long entry; |
6ad91658 | 277 | pmd_t *pmd = pmd_page + pmd_index(address); |
44df75e6 | 278 | |
5f51e139 | 279 | if (address >= end) { |
14a62c34 | 280 | if (!after_bootmem) { |
5f51e139 JB |
281 | for (; i < PTRS_PER_PMD; i++, pmd++) |
282 | set_pmd(pmd, __pmd(0)); | |
14a62c34 | 283 | } |
44df75e6 MT |
284 | break; |
285 | } | |
6ad91658 KM |
286 | |
287 | if (pmd_val(*pmd)) | |
288 | continue; | |
289 | ||
40842bf5 | 290 | entry = __PAGE_KERNEL_LARGE|_PAGE_GLOBAL|address; |
44df75e6 MT |
291 | entry &= __supported_pte_mask; |
292 | set_pmd(pmd, __pmd(entry)); | |
293 | } | |
294 | } | |
295 | ||
296 | static void __meminit | |
297 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) | |
298 | { | |
14a62c34 | 299 | pmd_t *pmd = pmd_offset(pud, 0); |
6ad91658 KM |
300 | spin_lock(&init_mm.page_table_lock); |
301 | phys_pmd_init(pmd, address, end); | |
302 | spin_unlock(&init_mm.page_table_lock); | |
303 | __flush_tlb_all(); | |
44df75e6 MT |
304 | } |
305 | ||
14a62c34 TG |
306 | static void __meminit |
307 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) | |
308 | { | |
6ad91658 | 309 | int i = pud_index(addr); |
44df75e6 | 310 | |
14a62c34 | 311 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { |
6ad91658 KM |
312 | unsigned long pmd_phys; |
313 | pud_t *pud = pud_page + pud_index(addr); | |
1da177e4 LT |
314 | pmd_t *pmd; |
315 | ||
6ad91658 | 316 | if (addr >= end) |
1da177e4 | 317 | break; |
1da177e4 | 318 | |
14a62c34 TG |
319 | if (!after_bootmem && |
320 | !e820_any_mapped(addr, addr+PUD_SIZE, 0)) { | |
321 | set_pud(pud, __pud(0)); | |
1da177e4 | 322 | continue; |
14a62c34 | 323 | } |
1da177e4 | 324 | |
6ad91658 KM |
325 | if (pud_val(*pud)) { |
326 | phys_pmd_update(pud, addr, end); | |
327 | continue; | |
328 | } | |
329 | ||
dafe41ee | 330 | pmd = alloc_low_page(&pmd_phys); |
14a62c34 | 331 | |
44df75e6 | 332 | spin_lock(&init_mm.page_table_lock); |
1da177e4 | 333 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); |
6ad91658 | 334 | phys_pmd_init(pmd, addr, end); |
44df75e6 | 335 | spin_unlock(&init_mm.page_table_lock); |
14a62c34 | 336 | |
dafe41ee | 337 | unmap_low_page(pmd); |
1da177e4 | 338 | } |
1a2b4412 | 339 | __flush_tlb_all(); |
14a62c34 | 340 | } |
1da177e4 LT |
341 | |
342 | static void __init find_early_table_space(unsigned long end) | |
343 | { | |
6c5acd16 | 344 | unsigned long puds, pmds, tables, start; |
1da177e4 LT |
345 | |
346 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | |
347 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | |
348 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) + | |
349 | round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | |
350 | ||
14a62c34 TG |
351 | /* |
352 | * RED-PEN putting page tables only on node 0 could | |
353 | * cause a hotspot and fill up ZONE_DMA. The page tables | |
354 | * need roughly 0.5KB per GB. | |
355 | */ | |
356 | start = 0x8000; | |
357 | table_start = find_e820_area(start, end, tables); | |
1da177e4 LT |
358 | if (table_start == -1UL) |
359 | panic("Cannot find space for the kernel page tables"); | |
360 | ||
91987157 YL |
361 | /* |
362 | * When you have a lot of RAM like 256GB, early_table will not fit | |
363 | * into 0x8000 range, find_e820_area() will find area after kernel | |
364 | * bss but the table_start is not page aligned, so need to round it | |
365 | * up to avoid overlap with bss: | |
366 | */ | |
367 | table_start = round_up(table_start, PAGE_SIZE); | |
1da177e4 LT |
368 | table_start >>= PAGE_SHIFT; |
369 | table_end = table_start; | |
44df75e6 MT |
370 | |
371 | early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", | |
5f51e139 JB |
372 | end, table_start << PAGE_SHIFT, |
373 | (table_start << PAGE_SHIFT) + tables); | |
1da177e4 LT |
374 | } |
375 | ||
14a62c34 TG |
376 | /* |
377 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | |
378 | * This runs before bootmem is initialized and gets pages directly from | |
379 | * the physical memory. To access them they are temporarily mapped. | |
380 | */ | |
b6fd6ecb | 381 | void __init_refok init_memory_mapping(unsigned long start, unsigned long end) |
14a62c34 TG |
382 | { |
383 | unsigned long next; | |
1da177e4 | 384 | |
10f22dde | 385 | pr_debug("init_memory_mapping\n"); |
1da177e4 | 386 | |
14a62c34 | 387 | /* |
1da177e4 | 388 | * Find space for the kernel direct mapping tables. |
14a62c34 TG |
389 | * |
390 | * Later we should allocate these tables in the local node of the | |
391 | * memory mapped. Unfortunately this is done currently before the | |
392 | * nodes are discovered. | |
1da177e4 | 393 | */ |
44df75e6 MT |
394 | if (!after_bootmem) |
395 | find_early_table_space(end); | |
1da177e4 LT |
396 | |
397 | start = (unsigned long)__va(start); | |
398 | end = (unsigned long)__va(end); | |
399 | ||
400 | for (; start < end; start = next) { | |
44df75e6 | 401 | pgd_t *pgd = pgd_offset_k(start); |
14a62c34 | 402 | unsigned long pud_phys; |
44df75e6 MT |
403 | pud_t *pud; |
404 | ||
405 | if (after_bootmem) | |
d2ae5b5f | 406 | pud = pud_offset(pgd, start & PGDIR_MASK); |
44df75e6 | 407 | else |
dafe41ee | 408 | pud = alloc_low_page(&pud_phys); |
44df75e6 | 409 | |
1da177e4 | 410 | next = start + PGDIR_SIZE; |
14a62c34 TG |
411 | if (next > end) |
412 | next = end; | |
1da177e4 | 413 | phys_pud_init(pud, __pa(start), __pa(next)); |
44df75e6 MT |
414 | if (!after_bootmem) |
415 | set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); | |
dafe41ee | 416 | unmap_low_page(pud); |
14a62c34 | 417 | } |
1da177e4 | 418 | |
44df75e6 | 419 | if (!after_bootmem) |
f51c9452 | 420 | mmu_cr4_features = read_cr4(); |
1da177e4 | 421 | __flush_tlb_all(); |
75175278 | 422 | |
25eff8d4 | 423 | reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT, "PGTABLE"); |
1da177e4 LT |
424 | } |
425 | ||
2b97690f | 426 | #ifndef CONFIG_NUMA |
1da177e4 LT |
427 | void __init paging_init(void) |
428 | { | |
6391af17 | 429 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
14a62c34 | 430 | |
6391af17 MG |
431 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
432 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | |
433 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; | |
434 | max_zone_pfns[ZONE_NORMAL] = end_pfn; | |
435 | ||
44df75e6 MT |
436 | memory_present(0, 0, end_pfn); |
437 | sparse_init(); | |
5cb248ab | 438 | free_area_init_nodes(max_zone_pfns); |
1da177e4 LT |
439 | } |
440 | #endif | |
441 | ||
14a62c34 TG |
442 | /* |
443 | * Unmap a kernel mapping if it exists. This is useful to avoid | |
444 | * prefetches from the CPU leading to inconsistent cache lines. | |
445 | * address and size must be aligned to 2MB boundaries. | |
446 | * Does nothing when the mapping doesn't exist. | |
447 | */ | |
448 | void __init clear_kernel_mapping(unsigned long address, unsigned long size) | |
1da177e4 LT |
449 | { |
450 | unsigned long end = address + size; | |
451 | ||
452 | BUG_ON(address & ~LARGE_PAGE_MASK); | |
14a62c34 TG |
453 | BUG_ON(size & ~LARGE_PAGE_MASK); |
454 | ||
455 | for (; address < end; address += LARGE_PAGE_SIZE) { | |
1da177e4 LT |
456 | pgd_t *pgd = pgd_offset_k(address); |
457 | pud_t *pud; | |
458 | pmd_t *pmd; | |
14a62c34 | 459 | |
1da177e4 LT |
460 | if (pgd_none(*pgd)) |
461 | continue; | |
14a62c34 | 462 | |
1da177e4 LT |
463 | pud = pud_offset(pgd, address); |
464 | if (pud_none(*pud)) | |
14a62c34 TG |
465 | continue; |
466 | ||
1da177e4 LT |
467 | pmd = pmd_offset(pud, address); |
468 | if (!pmd || pmd_none(*pmd)) | |
14a62c34 TG |
469 | continue; |
470 | ||
471 | if (!(pmd_val(*pmd) & _PAGE_PSE)) { | |
472 | /* | |
473 | * Could handle this, but it should not happen | |
474 | * currently: | |
475 | */ | |
476 | printk(KERN_ERR "clear_kernel_mapping: " | |
477 | "mapping has been split. will leak memory\n"); | |
478 | pmd_ERROR(*pmd); | |
1da177e4 | 479 | } |
14a62c34 | 480 | set_pmd(pmd, __pmd(0)); |
1da177e4 LT |
481 | } |
482 | __flush_tlb_all(); | |
14a62c34 | 483 | } |
1da177e4 | 484 | |
44df75e6 MT |
485 | /* |
486 | * Memory hotplug specific functions | |
44df75e6 | 487 | */ |
44df75e6 MT |
488 | void online_page(struct page *page) |
489 | { | |
490 | ClearPageReserved(page); | |
7835e98b | 491 | init_page_count(page); |
44df75e6 MT |
492 | __free_page(page); |
493 | totalram_pages++; | |
494 | num_physpages++; | |
495 | } | |
496 | ||
bc02af93 | 497 | #ifdef CONFIG_MEMORY_HOTPLUG |
9d99aaa3 AK |
498 | /* |
499 | * Memory is added always to NORMAL zone. This means you will never get | |
500 | * additional DMA/DMA32 memory. | |
501 | */ | |
bc02af93 | 502 | int arch_add_memory(int nid, u64 start, u64 size) |
44df75e6 | 503 | { |
bc02af93 | 504 | struct pglist_data *pgdat = NODE_DATA(nid); |
776ed98b | 505 | struct zone *zone = pgdat->node_zones + ZONE_NORMAL; |
44df75e6 MT |
506 | unsigned long start_pfn = start >> PAGE_SHIFT; |
507 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
508 | int ret; | |
509 | ||
14a62c34 | 510 | init_memory_mapping(start, start + size-1); |
45e0b78b | 511 | |
44df75e6 | 512 | ret = __add_pages(zone, start_pfn, nr_pages); |
10f22dde | 513 | WARN_ON(1); |
44df75e6 | 514 | |
44df75e6 | 515 | return ret; |
44df75e6 | 516 | } |
bc02af93 | 517 | EXPORT_SYMBOL_GPL(arch_add_memory); |
44df75e6 | 518 | |
8243229f | 519 | #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) |
4942e998 KM |
520 | int memory_add_physaddr_to_nid(u64 start) |
521 | { | |
522 | return 0; | |
523 | } | |
8c2676a5 | 524 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
4942e998 KM |
525 | #endif |
526 | ||
45e0b78b KM |
527 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
528 | ||
14a62c34 TG |
529 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, |
530 | kcore_modules, kcore_vsyscall; | |
1da177e4 LT |
531 | |
532 | void __init mem_init(void) | |
533 | { | |
0a43e4bf | 534 | long codesize, reservedpages, datasize, initsize; |
1da177e4 | 535 | |
0dc243ae | 536 | pci_iommu_alloc(); |
1da177e4 | 537 | |
48ddb154 | 538 | /* clear_bss() already clear the empty_zero_page */ |
1da177e4 | 539 | |
f2633105 IM |
540 | /* temporary debugging - double check it's true: */ |
541 | { | |
542 | int i; | |
543 | ||
544 | for (i = 0; i < 1024; i++) | |
545 | WARN_ON_ONCE(empty_zero_page[i]); | |
546 | } | |
547 | ||
1da177e4 LT |
548 | reservedpages = 0; |
549 | ||
550 | /* this will put all low memory onto the freelists */ | |
2b97690f | 551 | #ifdef CONFIG_NUMA |
0a43e4bf | 552 | totalram_pages = numa_free_all_bootmem(); |
1da177e4 | 553 | #else |
0a43e4bf | 554 | totalram_pages = free_all_bootmem(); |
1da177e4 | 555 | #endif |
5cb248ab MG |
556 | reservedpages = end_pfn - totalram_pages - |
557 | absent_pages_in_range(0, end_pfn); | |
1da177e4 LT |
558 | after_bootmem = 1; |
559 | ||
560 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
561 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
562 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
563 | ||
564 | /* Register memory areas for /proc/kcore */ | |
14a62c34 TG |
565 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); |
566 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
1da177e4 LT |
567 | VMALLOC_END-VMALLOC_START); |
568 | kclist_add(&kcore_kernel, &_stext, _end - _stext); | |
569 | kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN); | |
14a62c34 | 570 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, |
1da177e4 LT |
571 | VSYSCALL_END - VSYSCALL_START); |
572 | ||
10f22dde | 573 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " |
14a62c34 | 574 | "%ldk reserved, %ldk data, %ldk init)\n", |
1da177e4 LT |
575 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
576 | end_pfn << (PAGE_SHIFT-10), | |
577 | codesize >> 10, | |
578 | reservedpages << (PAGE_SHIFT-10), | |
579 | datasize >> 10, | |
580 | initsize >> 10); | |
1da177e4 LT |
581 | } |
582 | ||
d167a518 | 583 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
1da177e4 LT |
584 | { |
585 | unsigned long addr; | |
586 | ||
d167a518 GH |
587 | if (begin >= end) |
588 | return; | |
589 | ||
ee01f112 IM |
590 | /* |
591 | * If debugging page accesses then do not free this memory but | |
592 | * mark them not present - any buggy init-section access will | |
593 | * create a kernel page fault: | |
594 | */ | |
595 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
596 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | |
597 | begin, PAGE_ALIGN(end)); | |
598 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | |
599 | #else | |
6fb14755 | 600 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
14a62c34 | 601 | |
d167a518 | 602 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
e3ebadd9 LT |
603 | ClearPageReserved(virt_to_page(addr)); |
604 | init_page_count(virt_to_page(addr)); | |
605 | memset((void *)(addr & ~(PAGE_SIZE-1)), | |
606 | POISON_FREE_INITMEM, PAGE_SIZE); | |
e3ebadd9 | 607 | free_page(addr); |
1da177e4 LT |
608 | totalram_pages++; |
609 | } | |
ee01f112 | 610 | #endif |
d167a518 GH |
611 | } |
612 | ||
613 | void free_initmem(void) | |
614 | { | |
d167a518 | 615 | free_init_pages("unused kernel memory", |
e3ebadd9 LT |
616 | (unsigned long)(&__init_begin), |
617 | (unsigned long)(&__init_end)); | |
1da177e4 LT |
618 | } |
619 | ||
67df197b | 620 | #ifdef CONFIG_DEBUG_RODATA |
edeed305 AV |
621 | const int rodata_test_data = 0xC3; |
622 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
67df197b | 623 | |
67df197b AV |
624 | void mark_rodata_ro(void) |
625 | { | |
e3ebadd9 | 626 | unsigned long start = (unsigned long)_stext, end; |
67df197b | 627 | |
602033ed LT |
628 | #ifdef CONFIG_HOTPLUG_CPU |
629 | /* It must still be possible to apply SMP alternatives. */ | |
630 | if (num_possible_cpus() > 1) | |
631 | start = (unsigned long)_etext; | |
632 | #endif | |
633 | ||
634 | #ifdef CONFIG_KPROBES | |
635 | start = (unsigned long)__start_rodata; | |
636 | #endif | |
14a62c34 | 637 | |
e3ebadd9 LT |
638 | end = (unsigned long)__end_rodata; |
639 | start = (start + PAGE_SIZE - 1) & PAGE_MASK; | |
640 | end &= PAGE_MASK; | |
641 | if (end <= start) | |
642 | return; | |
643 | ||
6d238cc4 | 644 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
67df197b | 645 | |
6fb14755 | 646 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
e3ebadd9 | 647 | (end - start) >> 10); |
67df197b | 648 | |
1a487252 AV |
649 | rodata_test(); |
650 | ||
0c42f392 | 651 | #ifdef CONFIG_CPA_DEBUG |
10f22dde | 652 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); |
6d238cc4 | 653 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 654 | |
10f22dde | 655 | printk(KERN_INFO "Testing CPA: again\n"); |
6d238cc4 | 656 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 657 | #endif |
67df197b AV |
658 | } |
659 | #endif | |
660 | ||
1da177e4 LT |
661 | #ifdef CONFIG_BLK_DEV_INITRD |
662 | void free_initrd_mem(unsigned long start, unsigned long end) | |
663 | { | |
e3ebadd9 | 664 | free_init_pages("initrd memory", start, end); |
1da177e4 LT |
665 | } |
666 | #endif | |
667 | ||
14a62c34 TG |
668 | void __init reserve_bootmem_generic(unsigned long phys, unsigned len) |
669 | { | |
2b97690f | 670 | #ifdef CONFIG_NUMA |
1da177e4 | 671 | int nid = phys_to_nid(phys); |
5e58a02a AK |
672 | #endif |
673 | unsigned long pfn = phys >> PAGE_SHIFT; | |
14a62c34 | 674 | |
5e58a02a | 675 | if (pfn >= end_pfn) { |
14a62c34 TG |
676 | /* |
677 | * This can happen with kdump kernels when accessing | |
678 | * firmware tables: | |
679 | */ | |
5e58a02a AK |
680 | if (pfn < end_pfn_map) |
681 | return; | |
14a62c34 | 682 | |
5e58a02a AK |
683 | printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", |
684 | phys, len); | |
685 | return; | |
686 | } | |
687 | ||
688 | /* Should check here against the e820 map to avoid double free */ | |
689 | #ifdef CONFIG_NUMA | |
14a62c34 TG |
690 | reserve_bootmem_node(NODE_DATA(nid), phys, len); |
691 | #else | |
692 | reserve_bootmem(phys, len); | |
1da177e4 | 693 | #endif |
0e0b864e | 694 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { |
e18c6874 | 695 | dma_reserve += len / PAGE_SIZE; |
0e0b864e MG |
696 | set_dma_reserve(dma_reserve); |
697 | } | |
1da177e4 LT |
698 | } |
699 | ||
14a62c34 TG |
700 | int kern_addr_valid(unsigned long addr) |
701 | { | |
1da177e4 | 702 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; |
14a62c34 TG |
703 | pgd_t *pgd; |
704 | pud_t *pud; | |
705 | pmd_t *pmd; | |
706 | pte_t *pte; | |
1da177e4 LT |
707 | |
708 | if (above != 0 && above != -1UL) | |
14a62c34 TG |
709 | return 0; |
710 | ||
1da177e4 LT |
711 | pgd = pgd_offset_k(addr); |
712 | if (pgd_none(*pgd)) | |
713 | return 0; | |
714 | ||
715 | pud = pud_offset(pgd, addr); | |
716 | if (pud_none(*pud)) | |
14a62c34 | 717 | return 0; |
1da177e4 LT |
718 | |
719 | pmd = pmd_offset(pud, addr); | |
720 | if (pmd_none(*pmd)) | |
721 | return 0; | |
14a62c34 | 722 | |
1da177e4 LT |
723 | if (pmd_large(*pmd)) |
724 | return pfn_valid(pmd_pfn(*pmd)); | |
725 | ||
726 | pte = pte_offset_kernel(pmd, addr); | |
727 | if (pte_none(*pte)) | |
728 | return 0; | |
14a62c34 | 729 | |
1da177e4 LT |
730 | return pfn_valid(pte_pfn(*pte)); |
731 | } | |
732 | ||
14a62c34 TG |
733 | /* |
734 | * A pseudo VMA to allow ptrace access for the vsyscall page. This only | |
735 | * covers the 64bit vsyscall page now. 32bit has a real VMA now and does | |
736 | * not need special handling anymore: | |
737 | */ | |
1da177e4 | 738 | static struct vm_area_struct gate_vma = { |
14a62c34 TG |
739 | .vm_start = VSYSCALL_START, |
740 | .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), | |
741 | .vm_page_prot = PAGE_READONLY_EXEC, | |
742 | .vm_flags = VM_READ | VM_EXEC | |
1da177e4 LT |
743 | }; |
744 | ||
1da177e4 LT |
745 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) |
746 | { | |
747 | #ifdef CONFIG_IA32_EMULATION | |
1e014410 AK |
748 | if (test_tsk_thread_flag(tsk, TIF_IA32)) |
749 | return NULL; | |
1da177e4 LT |
750 | #endif |
751 | return &gate_vma; | |
752 | } | |
753 | ||
754 | int in_gate_area(struct task_struct *task, unsigned long addr) | |
755 | { | |
756 | struct vm_area_struct *vma = get_gate_vma(task); | |
14a62c34 | 757 | |
1e014410 AK |
758 | if (!vma) |
759 | return 0; | |
14a62c34 | 760 | |
1da177e4 LT |
761 | return (addr >= vma->vm_start) && (addr < vma->vm_end); |
762 | } | |
763 | ||
14a62c34 TG |
764 | /* |
765 | * Use this when you have no reliable task/vma, typically from interrupt | |
766 | * context. It is less reliable than using the task's vma and may give | |
767 | * false positives: | |
1da177e4 LT |
768 | */ |
769 | int in_gate_area_no_task(unsigned long addr) | |
770 | { | |
1e014410 | 771 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); |
1da177e4 | 772 | } |
2e1c49db | 773 | |
2aae950b AK |
774 | const char *arch_vma_name(struct vm_area_struct *vma) |
775 | { | |
776 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | |
777 | return "[vdso]"; | |
778 | if (vma == &gate_vma) | |
779 | return "[vsyscall]"; | |
780 | return NULL; | |
781 | } | |
0889eba5 CL |
782 | |
783 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
784 | /* | |
785 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | |
786 | */ | |
14a62c34 TG |
787 | int __meminit |
788 | vmemmap_populate(struct page *start_page, unsigned long size, int node) | |
0889eba5 CL |
789 | { |
790 | unsigned long addr = (unsigned long)start_page; | |
791 | unsigned long end = (unsigned long)(start_page + size); | |
792 | unsigned long next; | |
793 | pgd_t *pgd; | |
794 | pud_t *pud; | |
795 | pmd_t *pmd; | |
796 | ||
797 | for (; addr < end; addr = next) { | |
798 | next = pmd_addr_end(addr, end); | |
799 | ||
800 | pgd = vmemmap_pgd_populate(addr, node); | |
801 | if (!pgd) | |
802 | return -ENOMEM; | |
14a62c34 | 803 | |
0889eba5 CL |
804 | pud = vmemmap_pud_populate(pgd, addr, node); |
805 | if (!pud) | |
806 | return -ENOMEM; | |
807 | ||
808 | pmd = pmd_offset(pud, addr); | |
809 | if (pmd_none(*pmd)) { | |
810 | pte_t entry; | |
14a62c34 TG |
811 | void *p; |
812 | ||
813 | p = vmemmap_alloc_block(PMD_SIZE, node); | |
0889eba5 CL |
814 | if (!p) |
815 | return -ENOMEM; | |
816 | ||
14a62c34 TG |
817 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, |
818 | PAGE_KERNEL_LARGE); | |
0889eba5 CL |
819 | set_pmd(pmd, __pmd(pte_val(entry))); |
820 | ||
821 | printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n", | |
822 | addr, addr + PMD_SIZE - 1, p, node); | |
14a62c34 | 823 | } else { |
0889eba5 | 824 | vmemmap_verify((pte_t *)pmd, node, addr, next); |
14a62c34 | 825 | } |
0889eba5 | 826 | } |
0889eba5 CL |
827 | return 0; |
828 | } | |
829 | #endif |