]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved. | |
3 | * Copyright (c) 2001 Intel Corp. | |
4 | * Copyright (c) 2001 Tony Luck <tony.luck@intel.com> | |
5 | * Copyright (c) 2002 NEC Corp. | |
6 | * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com> | |
7 | * Copyright (c) 2004 Silicon Graphics, Inc | |
8 | * Russ Anderson <rja@sgi.com> | |
9 | * Jesse Barnes <jbarnes@sgi.com> | |
10 | * Jack Steiner <steiner@sgi.com> | |
11 | */ | |
12 | ||
13 | /* | |
14 | * Platform initialization for Discontig Memory | |
15 | */ | |
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/bootmem.h> | |
21 | #include <linux/acpi.h> | |
22 | #include <linux/efi.h> | |
23 | #include <linux/nodemask.h> | |
24 | #include <asm/pgalloc.h> | |
25 | #include <asm/tlb.h> | |
26 | #include <asm/meminit.h> | |
27 | #include <asm/numa.h> | |
28 | #include <asm/sections.h> | |
29 | ||
30 | /* | |
31 | * Track per-node information needed to setup the boot memory allocator, the | |
32 | * per-node areas, and the real VM. | |
33 | */ | |
34 | struct early_node_data { | |
35 | struct ia64_node_data *node_data; | |
36 | pg_data_t *pgdat; | |
37 | unsigned long pernode_addr; | |
38 | unsigned long pernode_size; | |
39 | struct bootmem_data bootmem_data; | |
40 | unsigned long num_physpages; | |
41 | unsigned long num_dma_physpages; | |
42 | unsigned long min_pfn; | |
43 | unsigned long max_pfn; | |
44 | }; | |
45 | ||
46 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; | |
564601a5 | 47 | static nodemask_t memory_less_mask __initdata; |
1da177e4 LT |
48 | |
49 | /* | |
50 | * To prevent cache aliasing effects, align per-node structures so that they | |
51 | * start at addresses that are strided by node number. | |
52 | */ | |
53 | #define NODEDATA_ALIGN(addr, node) \ | |
54 | ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + (node)*PERCPU_PAGE_SIZE) | |
55 | ||
56 | /** | |
57 | * build_node_maps - callback to setup bootmem structs for each node | |
58 | * @start: physical start of range | |
59 | * @len: length of range | |
60 | * @node: node where this range resides | |
61 | * | |
62 | * We allocate a struct bootmem_data for each piece of memory that we wish to | |
63 | * treat as a virtually contiguous block (i.e. each node). Each such block | |
64 | * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down | |
65 | * if necessary. Any non-existent pages will simply be part of the virtual | |
66 | * memmap. We also update min_low_pfn and max_low_pfn here as we receive | |
67 | * memory ranges from the caller. | |
68 | */ | |
69 | static int __init build_node_maps(unsigned long start, unsigned long len, | |
70 | int node) | |
71 | { | |
72 | unsigned long cstart, epfn, end = start + len; | |
73 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; | |
74 | ||
75 | epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; | |
76 | cstart = GRANULEROUNDDOWN(start); | |
77 | ||
78 | if (!bdp->node_low_pfn) { | |
79 | bdp->node_boot_start = cstart; | |
80 | bdp->node_low_pfn = epfn; | |
81 | } else { | |
82 | bdp->node_boot_start = min(cstart, bdp->node_boot_start); | |
83 | bdp->node_low_pfn = max(epfn, bdp->node_low_pfn); | |
84 | } | |
85 | ||
86 | min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT); | |
87 | max_low_pfn = max(max_low_pfn, bdp->node_low_pfn); | |
88 | ||
89 | return 0; | |
90 | } | |
91 | ||
92 | /** | |
564601a5 | 93 | * early_nr_cpus_node - return number of cpus on a given node |
1da177e4 LT |
94 | * @node: node to check |
95 | * | |
564601a5 | 96 | * Count the number of cpus on @node. We can't use nr_cpus_node() yet because |
1da177e4 | 97 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been |
564601a5 | 98 | * called yet. Note that node 0 will also count all non-existent cpus. |
1da177e4 | 99 | */ |
564601a5 | 100 | static int __init early_nr_cpus_node(int node) |
1da177e4 LT |
101 | { |
102 | int cpu, n = 0; | |
103 | ||
104 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
105 | if (node == node_cpuid[cpu].nid) | |
564601a5 | 106 | n++; |
1da177e4 LT |
107 | |
108 | return n; | |
109 | } | |
110 | ||
564601a5 | 111 | /** |
112 | * compute_pernodesize - compute size of pernode data | |
113 | * @node: the node id. | |
114 | */ | |
115 | static unsigned long __init compute_pernodesize(int node) | |
116 | { | |
117 | unsigned long pernodesize = 0, cpus; | |
118 | ||
119 | cpus = early_nr_cpus_node(node); | |
120 | pernodesize += PERCPU_PAGE_SIZE * cpus; | |
121 | pernodesize += node * L1_CACHE_BYTES; | |
122 | pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); | |
123 | pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | |
124 | pernodesize = PAGE_ALIGN(pernodesize); | |
125 | return pernodesize; | |
126 | } | |
1da177e4 | 127 | |
8d7e3517 TL |
128 | /** |
129 | * per_cpu_node_setup - setup per-cpu areas on each node | |
130 | * @cpu_data: per-cpu area on this node | |
131 | * @node: node to setup | |
132 | * | |
133 | * Copy the static per-cpu data into the region we just set aside and then | |
134 | * setup __per_cpu_offset for each CPU on this node. Return a pointer to | |
135 | * the end of the area. | |
136 | */ | |
137 | static void *per_cpu_node_setup(void *cpu_data, int node) | |
138 | { | |
139 | #ifdef CONFIG_SMP | |
140 | int cpu; | |
141 | ||
142 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
143 | if (node == node_cpuid[cpu].nid) { | |
144 | memcpy(__va(cpu_data), __phys_per_cpu_start, | |
145 | __per_cpu_end - __per_cpu_start); | |
146 | __per_cpu_offset[cpu] = (char*)__va(cpu_data) - | |
147 | __per_cpu_start; | |
148 | cpu_data += PERCPU_PAGE_SIZE; | |
149 | } | |
150 | } | |
151 | #endif | |
152 | return cpu_data; | |
153 | } | |
154 | ||
1da177e4 | 155 | /** |
564601a5 | 156 | * fill_pernode - initialize pernode data. |
157 | * @node: the node id. | |
158 | * @pernode: physical address of pernode data | |
159 | * @pernodesize: size of the pernode data | |
1da177e4 | 160 | */ |
564601a5 | 161 | static void __init fill_pernode(int node, unsigned long pernode, |
162 | unsigned long pernodesize) | |
1da177e4 | 163 | { |
564601a5 | 164 | void *cpu_data; |
8d7e3517 | 165 | int cpus = early_nr_cpus_node(node); |
564601a5 | 166 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; |
1da177e4 | 167 | |
564601a5 | 168 | mem_data[node].pernode_addr = pernode; |
169 | mem_data[node].pernode_size = pernodesize; | |
170 | memset(__va(pernode), 0, pernodesize); | |
1da177e4 | 171 | |
564601a5 | 172 | cpu_data = (void *)pernode; |
173 | pernode += PERCPU_PAGE_SIZE * cpus; | |
174 | pernode += node * L1_CACHE_BYTES; | |
175 | ||
176 | mem_data[node].pgdat = __va(pernode); | |
177 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | |
178 | ||
179 | mem_data[node].node_data = __va(pernode); | |
180 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | |
181 | ||
182 | mem_data[node].pgdat->bdata = bdp; | |
183 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | |
184 | ||
8d7e3517 | 185 | cpu_data = per_cpu_node_setup(cpu_data, node); |
1da177e4 | 186 | |
564601a5 | 187 | return; |
188 | } | |
8d7e3517 | 189 | |
1da177e4 LT |
190 | /** |
191 | * find_pernode_space - allocate memory for memory map and per-node structures | |
192 | * @start: physical start of range | |
193 | * @len: length of range | |
194 | * @node: node where this range resides | |
195 | * | |
196 | * This routine reserves space for the per-cpu data struct, the list of | |
197 | * pg_data_ts and the per-node data struct. Each node will have something like | |
198 | * the following in the first chunk of addr. space large enough to hold it. | |
199 | * | |
200 | * ________________________ | |
201 | * | | | |
202 | * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first | |
203 | * | PERCPU_PAGE_SIZE * | start and length big enough | |
204 | * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus. | |
205 | * |------------------------| | |
206 | * | local pg_data_t * | | |
207 | * |------------------------| | |
208 | * | local ia64_node_data | | |
209 | * |------------------------| | |
210 | * | ??? | | |
211 | * |________________________| | |
212 | * | |
213 | * Once this space has been set aside, the bootmem maps are initialized. We | |
214 | * could probably move the allocation of the per-cpu and ia64_node_data space | |
215 | * outside of this function and use alloc_bootmem_node(), but doing it here | |
216 | * is straightforward and we get the alignments we want so... | |
217 | */ | |
218 | static int __init find_pernode_space(unsigned long start, unsigned long len, | |
219 | int node) | |
220 | { | |
564601a5 | 221 | unsigned long epfn; |
1da177e4 | 222 | unsigned long pernodesize = 0, pernode, pages, mapsize; |
1da177e4 LT |
223 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; |
224 | ||
225 | epfn = (start + len) >> PAGE_SHIFT; | |
226 | ||
227 | pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT); | |
228 | mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT; | |
229 | ||
230 | /* | |
231 | * Make sure this memory falls within this node's usable memory | |
232 | * since we may have thrown some away in build_maps(). | |
233 | */ | |
234 | if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn) | |
235 | return 0; | |
236 | ||
237 | /* Don't setup this node's local space twice... */ | |
238 | if (mem_data[node].pernode_addr) | |
239 | return 0; | |
240 | ||
241 | /* | |
242 | * Calculate total size needed, incl. what's necessary | |
243 | * for good alignment and alias prevention. | |
244 | */ | |
564601a5 | 245 | pernodesize = compute_pernodesize(node); |
1da177e4 LT |
246 | pernode = NODEDATA_ALIGN(start, node); |
247 | ||
248 | /* Is this range big enough for what we want to store here? */ | |
564601a5 | 249 | if (start + len > (pernode + pernodesize + mapsize)) |
250 | fill_pernode(node, pernode, pernodesize); | |
1da177e4 LT |
251 | |
252 | return 0; | |
253 | } | |
254 | ||
255 | /** | |
256 | * free_node_bootmem - free bootmem allocator memory for use | |
257 | * @start: physical start of range | |
258 | * @len: length of range | |
259 | * @node: node where this range resides | |
260 | * | |
261 | * Simply calls the bootmem allocator to free the specified ranged from | |
262 | * the given pg_data_t's bdata struct. After this function has been called | |
263 | * for all the entries in the EFI memory map, the bootmem allocator will | |
264 | * be ready to service allocation requests. | |
265 | */ | |
266 | static int __init free_node_bootmem(unsigned long start, unsigned long len, | |
267 | int node) | |
268 | { | |
269 | free_bootmem_node(mem_data[node].pgdat, start, len); | |
270 | ||
271 | return 0; | |
272 | } | |
273 | ||
274 | /** | |
275 | * reserve_pernode_space - reserve memory for per-node space | |
276 | * | |
277 | * Reserve the space used by the bootmem maps & per-node space in the boot | |
278 | * allocator so that when we actually create the real mem maps we don't | |
279 | * use their memory. | |
280 | */ | |
281 | static void __init reserve_pernode_space(void) | |
282 | { | |
283 | unsigned long base, size, pages; | |
284 | struct bootmem_data *bdp; | |
285 | int node; | |
286 | ||
287 | for_each_online_node(node) { | |
288 | pg_data_t *pdp = mem_data[node].pgdat; | |
289 | ||
564601a5 | 290 | if (node_isset(node, memory_less_mask)) |
291 | continue; | |
292 | ||
1da177e4 LT |
293 | bdp = pdp->bdata; |
294 | ||
295 | /* First the bootmem_map itself */ | |
296 | pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT); | |
297 | size = bootmem_bootmap_pages(pages) << PAGE_SHIFT; | |
298 | base = __pa(bdp->node_bootmem_map); | |
299 | reserve_bootmem_node(pdp, base, size); | |
300 | ||
301 | /* Now the per-node space */ | |
302 | size = mem_data[node].pernode_size; | |
303 | base = __pa(mem_data[node].pernode_addr); | |
304 | reserve_bootmem_node(pdp, base, size); | |
305 | } | |
306 | } | |
307 | ||
308 | /** | |
309 | * initialize_pernode_data - fixup per-cpu & per-node pointers | |
310 | * | |
311 | * Each node's per-node area has a copy of the global pg_data_t list, so | |
312 | * we copy that to each node here, as well as setting the per-cpu pointer | |
313 | * to the local node data structure. The active_cpus field of the per-node | |
314 | * structure gets setup by the platform_cpu_init() function later. | |
315 | */ | |
316 | static void __init initialize_pernode_data(void) | |
317 | { | |
1da177e4 | 318 | pg_data_t *pgdat_list[MAX_NUMNODES]; |
8d7e3517 | 319 | int cpu, node; |
1da177e4 LT |
320 | |
321 | for_each_online_node(node) | |
322 | pgdat_list[node] = mem_data[node].pgdat; | |
323 | ||
324 | /* Copy the pg_data_t list to each node and init the node field */ | |
325 | for_each_online_node(node) { | |
326 | memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, | |
327 | sizeof(pgdat_list)); | |
328 | } | |
8d7e3517 | 329 | #ifdef CONFIG_SMP |
1da177e4 LT |
330 | /* Set the node_data pointer for each per-cpu struct */ |
331 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
332 | node = node_cpuid[cpu].nid; | |
333 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; | |
334 | } | |
8d7e3517 TL |
335 | #else |
336 | { | |
337 | struct cpuinfo_ia64 *cpu0_cpu_info; | |
338 | cpu = 0; | |
339 | node = node_cpuid[cpu].nid; | |
340 | cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + | |
341 | ((char *)&per_cpu__cpu_info - __per_cpu_start)); | |
342 | cpu0_cpu_info->node_data = mem_data[node].node_data; | |
343 | } | |
344 | #endif /* CONFIG_SMP */ | |
1da177e4 LT |
345 | } |
346 | ||
564601a5 | 347 | /** |
348 | * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit | |
349 | * node but fall back to any other node when __alloc_bootmem_node fails | |
350 | * for best. | |
351 | * @nid: node id | |
352 | * @pernodesize: size of this node's pernode data | |
564601a5 | 353 | */ |
97835245 | 354 | static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) |
564601a5 | 355 | { |
356 | void *ptr = NULL; | |
357 | u8 best = 0xff; | |
97835245 | 358 | int bestnode = -1, node, anynode = 0; |
564601a5 | 359 | |
360 | for_each_online_node(node) { | |
361 | if (node_isset(node, memory_less_mask)) | |
362 | continue; | |
363 | else if (node_distance(nid, node) < best) { | |
364 | best = node_distance(nid, node); | |
365 | bestnode = node; | |
366 | } | |
97835245 | 367 | anynode = node; |
564601a5 | 368 | } |
369 | ||
97835245 BP |
370 | if (bestnode == -1) |
371 | bestnode = anynode; | |
372 | ||
373 | ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize, | |
374 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | |
564601a5 | 375 | |
564601a5 | 376 | return ptr; |
377 | } | |
378 | ||
379 | /** | |
380 | * pgdat_insert - insert the pgdat into global pgdat_list | |
381 | * @pgdat: the pgdat for a node. | |
382 | */ | |
383 | static void __init pgdat_insert(pg_data_t *pgdat) | |
384 | { | |
385 | pg_data_t *prev = NULL, *next; | |
386 | ||
387 | for_each_pgdat(next) | |
388 | if (pgdat->node_id < next->node_id) | |
389 | break; | |
390 | else | |
391 | prev = next; | |
392 | ||
393 | if (prev) { | |
394 | prev->pgdat_next = pgdat; | |
395 | pgdat->pgdat_next = next; | |
396 | } else { | |
397 | pgdat->pgdat_next = pgdat_list; | |
398 | pgdat_list = pgdat; | |
399 | } | |
400 | ||
401 | return; | |
402 | } | |
403 | ||
404 | /** | |
405 | * memory_less_nodes - allocate and initialize CPU only nodes pernode | |
406 | * information. | |
407 | */ | |
408 | static void __init memory_less_nodes(void) | |
409 | { | |
410 | unsigned long pernodesize; | |
411 | void *pernode; | |
412 | int node; | |
413 | ||
414 | for_each_node_mask(node, memory_less_mask) { | |
415 | pernodesize = compute_pernodesize(node); | |
97835245 | 416 | pernode = memory_less_node_alloc(node, pernodesize); |
564601a5 | 417 | fill_pernode(node, __pa(pernode), pernodesize); |
418 | } | |
419 | ||
420 | return; | |
421 | } | |
422 | ||
2d4b1fa2 BP |
423 | #ifdef CONFIG_SPARSEMEM |
424 | /** | |
425 | * register_sparse_mem - notify SPARSEMEM that this memory range exists. | |
426 | * @start: physical start of range | |
427 | * @end: physical end of range | |
428 | * @arg: unused | |
429 | * | |
430 | * Simply calls SPARSEMEM to register memory section(s). | |
431 | */ | |
432 | static int __init register_sparse_mem(unsigned long start, unsigned long end, | |
433 | void *arg) | |
434 | { | |
435 | int nid; | |
436 | ||
437 | start = __pa(start) >> PAGE_SHIFT; | |
438 | end = __pa(end) >> PAGE_SHIFT; | |
439 | nid = early_pfn_to_nid(start); | |
440 | memory_present(nid, start, end); | |
441 | ||
442 | return 0; | |
443 | } | |
444 | ||
445 | static void __init arch_sparse_init(void) | |
446 | { | |
447 | efi_memmap_walk(register_sparse_mem, NULL); | |
448 | sparse_init(); | |
449 | } | |
450 | #else | |
451 | #define arch_sparse_init() do {} while (0) | |
452 | #endif | |
453 | ||
1da177e4 LT |
454 | /** |
455 | * find_memory - walk the EFI memory map and setup the bootmem allocator | |
456 | * | |
457 | * Called early in boot to setup the bootmem allocator, and to | |
458 | * allocate the per-cpu and per-node structures. | |
459 | */ | |
460 | void __init find_memory(void) | |
461 | { | |
462 | int node; | |
463 | ||
464 | reserve_memory(); | |
465 | ||
466 | if (num_online_nodes() == 0) { | |
467 | printk(KERN_ERR "node info missing!\n"); | |
468 | node_set_online(0); | |
469 | } | |
470 | ||
564601a5 | 471 | nodes_or(memory_less_mask, memory_less_mask, node_online_map); |
1da177e4 LT |
472 | min_low_pfn = -1; |
473 | max_low_pfn = 0; | |
474 | ||
1da177e4 LT |
475 | /* These actually end up getting called by call_pernode_memory() */ |
476 | efi_memmap_walk(filter_rsvd_memory, build_node_maps); | |
477 | efi_memmap_walk(filter_rsvd_memory, find_pernode_space); | |
478 | ||
564601a5 | 479 | for_each_online_node(node) |
480 | if (mem_data[node].bootmem_data.node_low_pfn) { | |
481 | node_clear(node, memory_less_mask); | |
482 | mem_data[node].min_pfn = ~0UL; | |
483 | } | |
1da177e4 LT |
484 | /* |
485 | * Initialize the boot memory maps in reverse order since that's | |
486 | * what the bootmem allocator expects | |
487 | */ | |
488 | for (node = MAX_NUMNODES - 1; node >= 0; node--) { | |
489 | unsigned long pernode, pernodesize, map; | |
490 | struct bootmem_data *bdp; | |
491 | ||
492 | if (!node_online(node)) | |
493 | continue; | |
564601a5 | 494 | else if (node_isset(node, memory_less_mask)) |
495 | continue; | |
1da177e4 LT |
496 | |
497 | bdp = &mem_data[node].bootmem_data; | |
498 | pernode = mem_data[node].pernode_addr; | |
499 | pernodesize = mem_data[node].pernode_size; | |
500 | map = pernode + pernodesize; | |
501 | ||
1da177e4 LT |
502 | init_bootmem_node(mem_data[node].pgdat, |
503 | map>>PAGE_SHIFT, | |
504 | bdp->node_boot_start>>PAGE_SHIFT, | |
505 | bdp->node_low_pfn); | |
506 | } | |
507 | ||
508 | efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); | |
509 | ||
510 | reserve_pernode_space(); | |
564601a5 | 511 | memory_less_nodes(); |
1da177e4 LT |
512 | initialize_pernode_data(); |
513 | ||
514 | max_pfn = max_low_pfn; | |
515 | ||
516 | find_initrd(); | |
517 | } | |
518 | ||
8d7e3517 | 519 | #ifdef CONFIG_SMP |
1da177e4 LT |
520 | /** |
521 | * per_cpu_init - setup per-cpu variables | |
522 | * | |
523 | * find_pernode_space() does most of this already, we just need to set | |
524 | * local_per_cpu_offset | |
525 | */ | |
526 | void *per_cpu_init(void) | |
527 | { | |
528 | int cpu; | |
529 | ||
8d7e3517 TL |
530 | if (smp_processor_id() != 0) |
531 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | |
532 | ||
533 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
534 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | |
1da177e4 LT |
535 | |
536 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | |
537 | } | |
8d7e3517 | 538 | #endif /* CONFIG_SMP */ |
1da177e4 LT |
539 | |
540 | /** | |
541 | * show_mem - give short summary of memory stats | |
542 | * | |
543 | * Shows a simple page count of reserved and used pages in the system. | |
544 | * For discontig machines, it does this on a per-pgdat basis. | |
545 | */ | |
546 | void show_mem(void) | |
547 | { | |
548 | int i, total_reserved = 0; | |
549 | int total_shared = 0, total_cached = 0; | |
550 | unsigned long total_present = 0; | |
551 | pg_data_t *pgdat; | |
552 | ||
553 | printk("Mem-info:\n"); | |
554 | show_free_areas(); | |
555 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | |
556 | for_each_pgdat(pgdat) { | |
208d54e5 DH |
557 | unsigned long present; |
558 | unsigned long flags; | |
1da177e4 | 559 | int shared = 0, cached = 0, reserved = 0; |
208d54e5 | 560 | |
1da177e4 | 561 | printk("Node ID: %d\n", pgdat->node_id); |
208d54e5 DH |
562 | pgdat_resize_lock(pgdat, &flags); |
563 | present = pgdat->node_present_pages; | |
1da177e4 | 564 | for(i = 0; i < pgdat->node_spanned_pages; i++) { |
2d4b1fa2 BP |
565 | struct page *page; |
566 | if (pfn_valid(pgdat->node_start_pfn + i)) | |
567 | page = pfn_to_page(pgdat->node_start_pfn + i); | |
568 | else | |
1da177e4 | 569 | continue; |
408fde81 | 570 | if (PageReserved(page)) |
1da177e4 | 571 | reserved++; |
408fde81 | 572 | else if (PageSwapCache(page)) |
1da177e4 | 573 | cached++; |
408fde81 DH |
574 | else if (page_count(page)) |
575 | shared += page_count(page)-1; | |
1da177e4 | 576 | } |
208d54e5 | 577 | pgdat_resize_unlock(pgdat, &flags); |
1da177e4 LT |
578 | total_present += present; |
579 | total_reserved += reserved; | |
580 | total_cached += cached; | |
581 | total_shared += shared; | |
582 | printk("\t%ld pages of RAM\n", present); | |
583 | printk("\t%d reserved pages\n", reserved); | |
584 | printk("\t%d pages shared\n", shared); | |
585 | printk("\t%d pages swap cached\n", cached); | |
586 | } | |
587 | printk("%ld pages of RAM\n", total_present); | |
588 | printk("%d reserved pages\n", total_reserved); | |
589 | printk("%d pages shared\n", total_shared); | |
590 | printk("%d pages swap cached\n", total_cached); | |
fde740e4 RH |
591 | printk("Total of %ld pages in page table cache\n", |
592 | pgtable_quicklist_total_size()); | |
1da177e4 LT |
593 | printk("%d free buffer pages\n", nr_free_buffer_pages()); |
594 | } | |
595 | ||
596 | /** | |
597 | * call_pernode_memory - use SRAT to call callback functions with node info | |
598 | * @start: physical start of range | |
599 | * @len: length of range | |
600 | * @arg: function to call for each range | |
601 | * | |
602 | * efi_memmap_walk() knows nothing about layout of memory across nodes. Find | |
603 | * out to which node a block of memory belongs. Ignore memory that we cannot | |
604 | * identify, and split blocks that run across multiple nodes. | |
605 | * | |
606 | * Take this opportunity to round the start address up and the end address | |
607 | * down to page boundaries. | |
608 | */ | |
609 | void call_pernode_memory(unsigned long start, unsigned long len, void *arg) | |
610 | { | |
611 | unsigned long rs, re, end = start + len; | |
612 | void (*func)(unsigned long, unsigned long, int); | |
613 | int i; | |
614 | ||
615 | start = PAGE_ALIGN(start); | |
616 | end &= PAGE_MASK; | |
617 | if (start >= end) | |
618 | return; | |
619 | ||
620 | func = arg; | |
621 | ||
622 | if (!num_node_memblks) { | |
623 | /* No SRAT table, so assume one node (node 0) */ | |
624 | if (start < end) | |
625 | (*func)(start, end - start, 0); | |
626 | return; | |
627 | } | |
628 | ||
629 | for (i = 0; i < num_node_memblks; i++) { | |
630 | rs = max(start, node_memblk[i].start_paddr); | |
631 | re = min(end, node_memblk[i].start_paddr + | |
632 | node_memblk[i].size); | |
633 | ||
634 | if (rs < re) | |
635 | (*func)(rs, re - rs, node_memblk[i].nid); | |
636 | ||
637 | if (re == end) | |
638 | break; | |
639 | } | |
640 | } | |
641 | ||
642 | /** | |
643 | * count_node_pages - callback to build per-node memory info structures | |
644 | * @start: physical start of range | |
645 | * @len: length of range | |
646 | * @node: node where this range resides | |
647 | * | |
648 | * Each node has it's own number of physical pages, DMAable pages, start, and | |
649 | * end page frame number. This routine will be called by call_pernode_memory() | |
650 | * for each piece of usable memory and will setup these values for each node. | |
651 | * Very similar to build_maps(). | |
652 | */ | |
653 | static __init int count_node_pages(unsigned long start, unsigned long len, int node) | |
654 | { | |
655 | unsigned long end = start + len; | |
656 | ||
657 | mem_data[node].num_physpages += len >> PAGE_SHIFT; | |
658 | if (start <= __pa(MAX_DMA_ADDRESS)) | |
659 | mem_data[node].num_dma_physpages += | |
660 | (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; | |
661 | start = GRANULEROUNDDOWN(start); | |
662 | start = ORDERROUNDDOWN(start); | |
663 | end = GRANULEROUNDUP(end); | |
664 | mem_data[node].max_pfn = max(mem_data[node].max_pfn, | |
665 | end >> PAGE_SHIFT); | |
666 | mem_data[node].min_pfn = min(mem_data[node].min_pfn, | |
667 | start >> PAGE_SHIFT); | |
668 | ||
669 | return 0; | |
670 | } | |
671 | ||
672 | /** | |
673 | * paging_init - setup page tables | |
674 | * | |
675 | * paging_init() sets up the page tables for each node of the system and frees | |
676 | * the bootmem allocator memory for general use. | |
677 | */ | |
678 | void __init paging_init(void) | |
679 | { | |
680 | unsigned long max_dma; | |
681 | unsigned long zones_size[MAX_NR_ZONES]; | |
682 | unsigned long zholes_size[MAX_NR_ZONES]; | |
683 | unsigned long pfn_offset = 0; | |
684 | int node; | |
685 | ||
686 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; | |
687 | ||
2d4b1fa2 BP |
688 | arch_sparse_init(); |
689 | ||
1da177e4 LT |
690 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); |
691 | ||
2d4b1fa2 | 692 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
564601a5 | 693 | vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); |
694 | vmem_map = (struct page *) vmalloc_end; | |
695 | efi_memmap_walk(create_mem_map_page_table, NULL); | |
696 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | |
2d4b1fa2 | 697 | #endif |
564601a5 | 698 | |
1da177e4 LT |
699 | for_each_online_node(node) { |
700 | memset(zones_size, 0, sizeof(zones_size)); | |
701 | memset(zholes_size, 0, sizeof(zholes_size)); | |
702 | ||
703 | num_physpages += mem_data[node].num_physpages; | |
704 | ||
705 | if (mem_data[node].min_pfn >= max_dma) { | |
706 | /* All of this node's memory is above ZONE_DMA */ | |
707 | zones_size[ZONE_NORMAL] = mem_data[node].max_pfn - | |
708 | mem_data[node].min_pfn; | |
709 | zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn - | |
710 | mem_data[node].min_pfn - | |
711 | mem_data[node].num_physpages; | |
712 | } else if (mem_data[node].max_pfn < max_dma) { | |
713 | /* All of this node's memory is in ZONE_DMA */ | |
714 | zones_size[ZONE_DMA] = mem_data[node].max_pfn - | |
715 | mem_data[node].min_pfn; | |
716 | zholes_size[ZONE_DMA] = mem_data[node].max_pfn - | |
717 | mem_data[node].min_pfn - | |
718 | mem_data[node].num_dma_physpages; | |
719 | } else { | |
720 | /* This node has memory in both zones */ | |
721 | zones_size[ZONE_DMA] = max_dma - | |
722 | mem_data[node].min_pfn; | |
723 | zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - | |
724 | mem_data[node].num_dma_physpages; | |
725 | zones_size[ZONE_NORMAL] = mem_data[node].max_pfn - | |
726 | max_dma; | |
727 | zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] - | |
728 | (mem_data[node].num_physpages - | |
729 | mem_data[node].num_dma_physpages); | |
730 | } | |
731 | ||
1da177e4 LT |
732 | pfn_offset = mem_data[node].min_pfn; |
733 | ||
2d4b1fa2 | 734 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
1da177e4 | 735 | NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; |
2d4b1fa2 | 736 | #endif |
1da177e4 LT |
737 | free_area_init_node(node, NODE_DATA(node), zones_size, |
738 | pfn_offset, zholes_size); | |
739 | } | |
740 | ||
564601a5 | 741 | /* |
742 | * Make memory less nodes become a member of the known nodes. | |
743 | */ | |
744 | for_each_node_mask(node, memory_less_mask) | |
745 | pgdat_insert(mem_data[node].pgdat); | |
746 | ||
1da177e4 LT |
747 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); |
748 | } |